envio 3.0.0-alpha.19 → 3.0.0-alpha.19-main-node-pg-client

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "3.0.0-alpha.19",
3
+ "version": "3.0.0-alpha.19-main-node-pg-client",
4
4
  "type": "module",
5
5
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
6
6
  "bin": "./bin.mjs",
@@ -61,13 +61,13 @@
61
61
  "ink": "6.8.0",
62
62
  "ink-big-text": "2.0.0",
63
63
  "ink-spinner": "5.0.0",
64
- "postgres": "3.4.8",
64
+ "pg": "8.16.0",
65
65
  "tsx": "4.21.0"
66
66
  },
67
67
  "optionalDependencies": {
68
- "envio-linux-x64": "3.0.0-alpha.19",
69
- "envio-linux-arm64": "3.0.0-alpha.19",
70
- "envio-darwin-x64": "3.0.0-alpha.19",
71
- "envio-darwin-arm64": "3.0.0-alpha.19"
68
+ "envio-linux-x64": "3.0.0-alpha.19-main-node-pg-client",
69
+ "envio-linux-arm64": "3.0.0-alpha.19-main-node-pg-client",
70
+ "envio-darwin-x64": "3.0.0-alpha.19-main-node-pg-client",
71
+ "envio-darwin-arm64": "3.0.0-alpha.19-main-node-pg-client"
72
72
  }
73
73
  }
package/src/Config.res CHANGED
@@ -385,6 +385,7 @@ let parseEntitiesFromJson = (
385
385
 
386
386
  let table = Table.mkTable(
387
387
  entityName,
388
+ ~stmtId=index->Belt.Int.toString,
388
389
  ~fields=Array.concat(fields, derivedFields),
389
390
  ~compositeIndices,
390
391
  )
@@ -41,7 +41,7 @@ var schema = S$RescriptSchema.schema(function (s) {
41
41
 
42
42
  var rowsSchema = S$RescriptSchema.array(schema);
43
43
 
44
- var table = Table.mkTable(name, undefined, [
44
+ var table = Table.mkTable(name, undefined, undefined, [
45
45
  Table.mkField("id", "String", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
46
46
  Table.mkField("chain_id", "Int32", S$RescriptSchema.$$int, undefined, undefined, undefined, undefined, undefined, undefined),
47
47
  Table.mkField("registering_event_block_number", "Int32", S$RescriptSchema.$$int, undefined, undefined, undefined, undefined, undefined, undefined),
@@ -320,7 +320,7 @@ function parseEntitiesFromJson(entitiesJson, enumConfigsByName) {
320
320
  };
321
321
  }));
322
322
  }));
323
- var table = Table.mkTable(entityName, compositeIndices, Belt_Array.concat(fields, derivedFields));
323
+ var table = Table.mkTable(entityName, String(index), compositeIndices, Belt_Array.concat(fields, derivedFields));
324
324
  var schema = S$RescriptSchema.schema(function (s) {
325
325
  var dict = {};
326
326
  Belt_Array.forEach(entityJson.properties, (function (prop) {
package/src/Env.res CHANGED
@@ -109,7 +109,7 @@ module Db = {
109
109
  )
110
110
  let ssl = envSafe->EnvSafe.get(
111
111
  "ENVIO_PG_SSL_MODE",
112
- Postgres.sslOptionsSchema,
112
+ Pg.sslOptionsSchema,
113
113
  //this is a dev fallback option for local deployments, shouldn't run in the prod env
114
114
  //the SSL modes should be provided as string otherwise as 'require' | 'allow' | 'prefer' | 'verify-full'
115
115
  ~devFallback=Bool(false),
package/src/Env.res.mjs CHANGED
@@ -1,8 +1,8 @@
1
1
  // Generated by ReScript, PLEASE EDIT WITH CARE
2
2
 
3
+ import * as Pg from "./bindings/Pg.res.mjs";
3
4
  import * as EnvSafe from "rescript-envsafe/src/EnvSafe.res.mjs";
4
5
  import * as Logging from "./Logging.res.mjs";
5
- import * as Postgres from "./bindings/Postgres.res.mjs";
6
6
  import * as Belt_Option from "rescript/lib/es6/belt_Option.js";
7
7
  import * as HyperSyncClient from "./sources/HyperSyncClient.res.mjs";
8
8
  import * as S$RescriptSchema from "rescript-schema/src/S.res.mjs";
@@ -82,7 +82,7 @@ var database = EnvSafe.get(envSafe, "ENVIO_PG_DATABASE", S$RescriptSchema.string
82
82
 
83
83
  var publicSchema = EnvSafe.get(envSafe, "ENVIO_PG_SCHEMA", S$RescriptSchema.string, undefined, EnvSafe.get(envSafe, "ENVIO_PG_PUBLIC_SCHEMA", S$RescriptSchema.string, undefined, "public", undefined, undefined), undefined, undefined);
84
84
 
85
- var ssl = EnvSafe.get(envSafe, "ENVIO_PG_SSL_MODE", Postgres.sslOptionsSchema, undefined, undefined, false, undefined);
85
+ var ssl = EnvSafe.get(envSafe, "ENVIO_PG_SSL_MODE", Pg.sslOptionsSchema, undefined, undefined, false, undefined);
86
86
 
87
87
  var maxConnections = EnvSafe.get(envSafe, "ENVIO_PG_MAX_CONNECTIONS", S$RescriptSchema.$$int, undefined, 2, undefined, undefined);
88
88
 
@@ -148,7 +148,7 @@ var effectCacheItemRowsSchema = S$RescriptSchema.array(S$RescriptSchema.schema(f
148
148
  }));
149
149
 
150
150
  function makeCacheTable(effectName) {
151
- return Table.mkTable(cacheTablePrefix + effectName, undefined, [
151
+ return Table.mkTable(cacheTablePrefix + effectName, undefined, undefined, [
152
152
  Table.mkField("id", "String", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
153
153
  Table.mkField("output", "Json", cacheOutputSchema, undefined, undefined, true, undefined, undefined, undefined)
154
154
  ]);
package/src/PgStorage.res CHANGED
@@ -1,10 +1,7 @@
1
1
  let getCacheRowCountFnName = "get_cache_row_count"
2
2
 
3
- // Only needed for some old tests
4
- // Remove @genType in the future
5
- @genType
6
3
  let makeClient = () => {
7
- Postgres.makeSql(
4
+ Pg.makePool(
8
5
  ~config={
9
6
  host: Env.Db.host,
10
7
  port: Env.Db.port,
@@ -12,15 +9,7 @@ let makeClient = () => {
12
9
  password: Env.Db.password,
13
10
  database: Env.Db.database,
14
11
  ssl: Env.Db.ssl,
15
- // TODO: think how we want to pipe these logs to pino.
16
- onnotice: ?(
17
- Env.userLogLevel == Some(#warn) || Env.userLogLevel == Some(#error)
18
- ? None
19
- : Some(_str => ())
20
- ),
21
- transform: {undefined: Null},
22
12
  max: Env.Db.maxConnections,
23
- // debug: (~connection, ~query, ~params as _, ~types as _) => Js.log2(connection, query),
24
13
  },
25
14
  )
26
15
  }
@@ -163,6 +152,7 @@ let getEntityHistory = (~entityConfig: Internal.entityConfig): EntityHistory.pgE
163
152
  //ignore composite indices
164
153
  let table = Table.mkTable(
165
154
  historyTableName,
155
+ ~stmtId="h" ++ entityConfig.index->Belt.Int.toString,
166
156
  ~fields=dataFields->Belt.Array.concat([checkpointIdField, actionField]),
167
157
  )
168
158
 
@@ -387,8 +377,53 @@ VALUES${placeholders.contents}` ++
387
377
  // Constants for chunking
388
378
  let maxItemsPerQuery = 500
389
379
 
390
- let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
391
- let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
380
+ type tableBatchSetQuery = {
381
+ query: string,
382
+ convertOrThrow: array<unknown> => array<unknown>,
383
+ isInsertValues: bool,
384
+ jsonStringifier: option<array<unknown> => array<unknown>>,
385
+ }
386
+
387
+
388
+ // pg driver doesn't auto-serialize JSONB values like postgres.js did.
389
+ // Pre-stringify JSON field values in items before schema processing.
390
+ let makeJsonStringifier = (table: Table.table, jsonFieldIndices: array<int>) => {
391
+ if jsonFieldIndices->Utils.Array.notEmpty {
392
+ let jsonFieldNames = table->Table.getFields->Belt.Array.keepMap(field => {
393
+ switch field.fieldType {
394
+ | Json => Some(field->Table.getDbFieldName)
395
+ | _ => None
396
+ }
397
+ })
398
+ Some(
399
+ (items: array<unknown>) =>
400
+ items->Js.Array2.map(item => {
401
+ let dict = Js.Dict.fromArray(
402
+ Js.Dict.entries(item->(Utils.magic: unknown => dict<unknown>)),
403
+ )
404
+ jsonFieldNames->Js.Array2.forEach(name => {
405
+ switch dict->Js.Dict.get(name) {
406
+ | Some(v) =>
407
+ dict->Js.Dict.set(
408
+ name,
409
+ Js.Json.stringify(v->(Utils.magic: unknown => Js.Json.t))->(
410
+ Utils.magic: string => unknown
411
+ ),
412
+ )
413
+ | None => ()
414
+ }
415
+ })
416
+ dict->(Utils.magic: dict<unknown> => unknown)
417
+ }),
418
+ )
419
+ } else {
420
+ None
421
+ }
422
+ }
423
+
424
+ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>): tableBatchSetQuery => {
425
+ let {dbSchema, hasArrayField, jsonFieldIndices} = table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
426
+ let jsonStringifier = makeJsonStringifier(table, jsonFieldIndices)
392
427
 
393
428
  // Should move this to a better place
394
429
  // We need it for the isRawEvents check in makeTableBatchSet
@@ -411,37 +446,55 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
411
446
  let typeValidation = false
412
447
 
413
448
  if (isRawEvents || !hasArrayField) && !isHistoryUpdate {
414
- {
415
- "query": makeInsertUnnestSetQuery(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
416
- "convertOrThrow": S.compile(
449
+ let baseConvert =
450
+ S.compile(
417
451
  S.unnest(dbSchema),
418
452
  ~input=Value,
419
453
  ~output=Unknown,
420
454
  ~mode=Sync,
421
455
  ~typeValidation,
422
- ),
423
- "isInsertValues": false,
456
+ )->(Utils.magic: (array<unknown> => unknown) => (array<unknown> => array<unknown>))
457
+ {
458
+ query: makeInsertUnnestSetQuery(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
459
+ convertOrThrow: baseConvert,
460
+ isInsertValues: false,
461
+ jsonStringifier,
424
462
  }
425
463
  } else {
426
464
  {
427
- "query": makeInsertValuesSetQuery(
465
+ query: makeInsertValuesSetQuery(
428
466
  ~pgSchema,
429
467
  ~table,
430
468
  ~itemSchema,
431
469
  ~itemsCount=maxItemsPerQuery,
432
470
  ),
433
- "convertOrThrow": S.compile(
434
- S.unnest(itemSchema)->S.preprocess(_ => {
435
- serializer: Utils.Array.flatten->(
436
- Utils.magic: (array<array<'a>> => array<'a>) => unknown => unknown
437
- ),
438
- }),
439
- ~input=Value,
440
- ~output=Unknown,
441
- ~mode=Sync,
442
- ~typeValidation,
443
- ),
444
- "isInsertValues": true,
471
+ convertOrThrow: (if isHistoryUpdate {
472
+ // Entity history uses S.object with transformation (Change variant → raw dict)
473
+ // that gets lost during dbSchema conversion. Use itemSchema directly.
474
+ // JSON serialization is handled by jsonStringifier in setOrThrow.
475
+ S.compile(
476
+ S.unnest(itemSchema->S.toUnknown)->S.preprocess(_ => {
477
+ serializer: Utils.Array.flatten->(Utils.magic: (array<array<'a>> => array<'a>) => unknown => unknown),
478
+ }),
479
+ ~input=Value,
480
+ ~output=Unknown,
481
+ ~mode=Sync,
482
+ ~typeValidation,
483
+ )->Obj.magic
484
+ } else {
485
+ // Non-history tables with Array fields
486
+ S.compile(
487
+ S.unnest(dbSchema)->S.preprocess(_ => {
488
+ serializer: Utils.Array.flatten->(Utils.magic: (array<array<'a>> => array<'a>) => unknown => unknown),
489
+ }),
490
+ ~input=Value,
491
+ ~output=Unknown,
492
+ ~mode=Sync,
493
+ ~typeValidation,
494
+ )->Obj.magic
495
+ }: array<unknown> => array<unknown>),
496
+ isInsertValues: true,
497
+ jsonStringifier,
445
498
  }
446
499
  }
447
500
  }
@@ -486,7 +539,7 @@ exception PgEncodingError({table: Table.table})
486
539
 
487
540
  // WeakMap for caching table batch set queries
488
541
  let setQueryCache = Utils.WeakMap.make()
489
- let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
542
+ let setOrThrow = async (sql: Pg.sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
490
543
  if items->Array.length === 0 {
491
544
  ()
492
545
  } else {
@@ -504,34 +557,40 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
504
557
  }
505
558
  }
506
559
 
560
+ // Pre-stringify JSON field values for pg driver compatibility
561
+ let prepareItems = (rawItems: array<unknown>): array<unknown> =>
562
+ switch data.jsonStringifier {
563
+ | Some(stringify) => stringify(rawItems)
564
+ | None => rawItems
565
+ }
566
+
507
567
  try {
508
- if data["isInsertValues"] {
568
+ if data.isInsertValues {
509
569
  let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
510
570
  let responses = []
511
571
  chunks->Js.Array2.forEach(chunk => {
512
572
  let chunkSize = chunk->Array.length
513
573
  let isFullChunk = chunkSize === maxItemsPerQuery
514
574
 
515
- let params = data["convertOrThrow"](
516
- chunk->(Utils.magic: array<'item> => array<unknown>),
517
- )
518
- // Use prepared query only for full batches where the cached query is reused.
519
- // Partial chunks generate unique SQL each time, so preparation has no benefit.
520
- let response = isFullChunk
521
- ? sql->Postgres.preparedUnsafe(data["query"], params)
522
- : sql->Postgres.unpreparedUnsafe(
523
- makeInsertValuesSetQuery(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
524
- params,
525
- )
575
+ let response = sql.query({
576
+ text: isFullChunk
577
+ ? data.query
578
+ // Create a new query for partial chunks on the fly.
579
+ : makeInsertValuesSetQuery(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
580
+ values: data.convertOrThrow(prepareItems(chunk->(Utils.magic: array<'item> => array<unknown>))),
581
+ // Don't prepare partial chunks - they vary in size and would pollute the cache
582
+ name: ?isFullChunk ? Some(`insert_${table.stmtId}`) : None,
583
+ })
526
584
  responses->Js.Array2.push(response)->ignore
527
585
  })
528
586
  let _ = await Promise.all(responses)
529
587
  } else {
530
588
  // Use UNNEST approach for single query
531
- await sql->Postgres.preparedUnsafe(
532
- data["query"],
533
- data["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
534
- )
589
+ let _ = await sql.query({
590
+ text: data.query,
591
+ values: data.convertOrThrow(prepareItems(items->(Utils.magic: array<'item> => array<unknown>))),
592
+ name: `upsert_${table.stmtId}`,
593
+ })
535
594
  }
536
595
  } catch {
537
596
  | S.Raised(_) as exn =>
@@ -643,19 +702,21 @@ let getConnectedPsqlExec = {
643
702
  }
644
703
  }
645
704
 
646
- let deleteByIdsOrThrow = async (sql, ~pgSchema, ~ids, ~table: Table.table) => {
705
+ let deleteByIdsOrThrow = async (sql: Pg.sql, ~pgSchema, ~ids, ~table: Table.table) => {
647
706
  switch await (
648
707
  switch ids {
649
708
  | [_] =>
650
- sql->Postgres.preparedUnsafe(
651
- makeDeleteByIdQuery(~pgSchema, ~tableName=table.tableName),
652
- ids->Obj.magic,
653
- )
709
+ sql.query({
710
+ name: `delete_id_${table.stmtId}`,
711
+ text: makeDeleteByIdQuery(~pgSchema, ~tableName=table.tableName),
712
+ values: ids->Obj.magic,
713
+ })
654
714
  | _ =>
655
- sql->Postgres.preparedUnsafe(
656
- makeDeleteByIdsQuery(~pgSchema, ~tableName=table.tableName),
657
- [ids]->Obj.magic,
658
- )
715
+ sql.query({
716
+ name: `delete_ids_${table.stmtId}`,
717
+ text: makeDeleteByIdsQuery(~pgSchema, ~tableName=table.tableName),
718
+ values: [ids]->Obj.magic,
719
+ })
659
720
  }
660
721
  ) {
661
722
  | exception exn =>
@@ -716,9 +777,9 @@ FROM UNNEST($1::text[], $2::${checkpointIdPgType}[]) AS u(${Table.idFieldName},
716
777
  }
717
778
 
718
779
  let executeSet = (
719
- sql: Postgres.sql,
780
+ sql: Pg.sql,
720
781
  ~items: array<'a>,
721
- ~dbFunction: (Postgres.sql, array<'a>) => promise<unit>,
782
+ ~dbFunction: (Pg.sql, array<'a>) => promise<unit>,
722
783
  ) => {
723
784
  if items->Array.length > 0 {
724
785
  sql->dbFunction(items)
@@ -728,7 +789,7 @@ let executeSet = (
728
789
  }
729
790
 
730
791
  let rec writeBatch = async (
731
- sql,
792
+ pool: Pg.pool,
732
793
  ~batch: Batch.t,
733
794
  ~rawEvents,
734
795
  ~pgSchema,
@@ -825,11 +886,11 @@ let rec writeBatch = async (
825
886
 
826
887
  if batchDeleteCheckpointIds->Utils.Array.notEmpty {
827
888
  promises->Belt.Array.push(
828
- sql
829
- ->Postgres.preparedUnsafe(
830
- makeInsertDeleteUpdatesQuery(~entityConfig, ~pgSchema),
831
- (batchDeleteEntityIds, batchDeleteCheckpointIds->BigInt.arrayToStringArray)->Obj.magic,
832
- )
889
+ sql.query({
890
+ name: `insert_deletes_${entityConfig.index->Belt.Int.toString}`,
891
+ text: makeInsertDeleteUpdatesQuery(~entityConfig, ~pgSchema),
892
+ values: (batchDeleteEntityIds, batchDeleteCheckpointIds->BigInt.arrayToStringArray)->Obj.magic,
893
+ })
833
894
  ->Promise.ignoreValue,
834
895
  )
835
896
  }
@@ -881,7 +942,7 @@ let rec writeBatch = async (
881
942
 
882
943
  let _ = await promises->Promise.all
883
944
  } catch {
884
- // There's a race condition that sql->Postgres.beginSql
945
+ // There's a race condition that pool->Pg.beginSql
885
946
  // might throw PG error, earlier, than the handled error
886
947
  // from setOrThrow will be passed through.
887
948
  // This is needed for the utf8 encoding fix.
@@ -918,7 +979,7 @@ let rec writeBatch = async (
918
979
 
919
980
  // Improtant: Don't rethrow here, since it'll result in
920
981
  // an unhandled rejected promise error.
921
- // That's fine not to throw, since sql->Postgres.beginSql
982
+ // That's fine not to throw, since pool->Pg.beginSql
922
983
  // will fail anyways.
923
984
  }
924
985
  }
@@ -953,7 +1014,7 @@ let rec writeBatch = async (
953
1014
 
954
1015
  try {
955
1016
  let _ = await Promise.all2((
956
- sql->Postgres.beginSql(async sql => {
1017
+ pool->Pg.beginSql(async sql => {
957
1018
  //Rollback tables need to happen first in the traction
958
1019
  switch rollbackTables {
959
1020
  | Some(rollbackTables) =>
@@ -1036,7 +1097,7 @@ let rec writeBatch = async (
1036
1097
  let _ = escapeTables->Utils.Set.add(table)
1037
1098
  // Retry with specifying which tables to escape.
1038
1099
  await writeBatch(
1039
- sql,
1100
+ pool,
1040
1101
  ~escapeTables,
1041
1102
  ~rawEvents,
1042
1103
  ~batch,
@@ -1102,7 +1163,7 @@ AND NOT EXISTS (
1102
1163
  }
1103
1164
 
1104
1165
  let make = (
1105
- ~sql: Postgres.sql,
1166
+ ~pool: Pg.pool,
1106
1167
  ~pgHost,
1107
1168
  ~pgSchema,
1108
1169
  ~pgPort,
@@ -1114,6 +1175,7 @@ let make = (
1114
1175
  ~onInitialize=?,
1115
1176
  ~onNewTables=?,
1116
1177
  ): Persistence.storage => {
1178
+ let sql = pool->Pg.poolToSql
1117
1179
  // Must match PG_CONTAINER in packages/cli/src/docker_env.rs
1118
1180
  let containerName = "envio-postgres"
1119
1181
  let psqlExecOptions: NodeJs.ChildProcess.execOptions = {
@@ -1127,11 +1189,11 @@ let make = (
1127
1189
  ])
1128
1190
 
1129
1191
  let isInitialized = async () => {
1130
- let envioTables = await sql->Postgres.unsafe(
1131
- `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND (table_name = '${// This is for indexer before envio@2.28
1192
+ let {rows: envioTables} = await sql.query({
1193
+ text: `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND (table_name = '${// This is for indexer before envio@2.28
1132
1194
  "event_sync_state"}' OR table_name = '${InternalTable.Chains.table.tableName}');`,
1133
- )
1134
- envioTables->Utils.Array.notEmpty
1195
+ })
1196
+ envioTables->Js.Array2.length > 0
1135
1197
  }
1136
1198
 
1137
1199
  let restoreEffectCache = async (~withUpload) => {
@@ -1156,9 +1218,8 @@ let make = (
1156
1218
  let effectName = entry->Js.String2.slice(~from=0, ~to_=-4) // Remove .tsv extension
1157
1219
  let table = Internal.makeCacheTable(~effectName)
1158
1220
 
1159
- sql
1160
- ->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false))
1161
- ->Promise.then(() => {
1221
+ sql.query({text: makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false)})
1222
+ ->Promise.then(_ => {
1162
1223
  let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
1163
1224
 
1164
1225
  let command = `${psqlExec} -c 'COPY "${pgSchema}"."${table.tableName}" FROM STDIN WITH (FORMAT text, HEADER);' < ${inputFile}`
@@ -1194,7 +1255,8 @@ let make = (
1194
1255
  }
1195
1256
 
1196
1257
  let cacheTableInfo: array<schemaCacheTableInfo> =
1197
- await sql->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema))
1258
+ (await sql.query({text: makeSchemaCacheTableInfoQuery(~pgSchema)})).rows
1259
+ ->(Utils.magic: array<unknown> => array<schemaCacheTableInfo>)
1198
1260
 
1199
1261
  if withUpload && cacheTableInfo->Utils.Array.notEmpty {
1200
1262
  // Integration with other tools like Hasura
@@ -1219,7 +1281,8 @@ let make = (
1219
1281
 
1220
1282
  let initialize = async (~chainConfigs=[], ~entities=[], ~enums=[]): Persistence.initialState => {
1221
1283
  let schemaTableNames: array<schemaTableName> =
1222
- await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
1284
+ (await sql.query({text: makeSchemaTableNamesQuery(~pgSchema)})).rows
1285
+ ->(Utils.magic: array<unknown> => array<schemaTableName>)
1223
1286
 
1224
1287
  // The initialization query will completely drop the schema and recreate it from scratch.
1225
1288
  // So we need to check if the schema is not used for anything else than envio.
@@ -1259,10 +1322,10 @@ let make = (
1259
1322
  ~isHasuraEnabled,
1260
1323
  )
1261
1324
  // Execute all queries within a single transaction for integrity
1262
- let _ = await sql->Postgres.beginSql(sql => {
1325
+ let _ = await pool->Pg.beginSql(sql => {
1263
1326
  // Promise.all might be not safe to use here,
1264
1327
  // but it's just how it worked before.
1265
- Promise.all(queries->Js.Array2.map(query => sql->Postgres.unsafe(query)))
1328
+ Promise.all(queries->Js.Array2.map(query => sql.query({text: query})))
1266
1329
  })
1267
1330
 
1268
1331
  let cache = await restoreEffectCache(~withUpload=true)
@@ -1297,15 +1360,17 @@ let make = (
1297
1360
  switch await (
1298
1361
  switch ids {
1299
1362
  | [_] =>
1300
- sql->Postgres.preparedUnsafe(
1301
- makeLoadByIdQuery(~pgSchema, ~tableName=table.tableName),
1302
- ids->Obj.magic,
1303
- )
1363
+ sql.query({
1364
+ name: `load_id_${table.stmtId}`,
1365
+ text: makeLoadByIdQuery(~pgSchema, ~tableName=table.tableName),
1366
+ values: ids->Obj.magic,
1367
+ })
1304
1368
  | _ =>
1305
- sql->Postgres.preparedUnsafe(
1306
- makeLoadByIdsQuery(~pgSchema, ~tableName=table.tableName),
1307
- [ids]->Obj.magic,
1308
- )
1369
+ sql.query({
1370
+ name: `load_ids_${table.stmtId}`,
1371
+ text: makeLoadByIdsQuery(~pgSchema, ~tableName=table.tableName),
1372
+ values: [ids]->Obj.magic,
1373
+ })
1309
1374
  }
1310
1375
  ) {
1311
1376
  | exception exn =>
@@ -1315,7 +1380,7 @@ let make = (
1315
1380
  reason: exn,
1316
1381
  }),
1317
1382
  )
1318
- | rows =>
1383
+ | {rows} =>
1319
1384
  try rows->S.parseOrThrow(rowsSchema) catch {
1320
1385
  | exn =>
1321
1386
  raise(
@@ -1345,15 +1410,16 @@ let make = (
1345
1410
  }),
1346
1411
  )
1347
1412
  }
1348
- switch await sql->Postgres.preparedUnsafe(
1349
- makeLoadByFieldQuery(
1413
+ switch await sql.query({
1414
+ name: `load_field_${table.tableName}_${fieldName}_${(operator :> string)}`,
1415
+ text: makeLoadByFieldQuery(
1350
1416
  ~pgSchema,
1351
1417
  ~tableName=table.tableName,
1352
1418
  ~fieldName,
1353
1419
  ~operator=(operator :> string),
1354
1420
  ),
1355
- params,
1356
- ) {
1421
+ values: params,
1422
+ }) {
1357
1423
  | exception exn =>
1358
1424
  raise(
1359
1425
  Persistence.StorageError({
@@ -1361,7 +1427,7 @@ let make = (
1361
1427
  reason: exn,
1362
1428
  }),
1363
1429
  )
1364
- | rows =>
1430
+ | {rows} =>
1365
1431
  try rows->S.parseOrThrow(rowsSchema) catch {
1366
1432
  | exn =>
1367
1433
  raise(
@@ -1398,9 +1464,9 @@ let make = (
1398
1464
 
1399
1465
  if initialize {
1400
1466
  let _ =
1401
- await sql->Postgres.unsafe(
1402
- makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false),
1403
- )
1467
+ await sql.query({
1468
+ text: makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false),
1469
+ })
1404
1470
  // Integration with other tools like Hasura
1405
1471
  switch onNewTables {
1406
1472
  | Some(onNewTables) => await onNewTables(~tableNames=[table.tableName])
@@ -1414,8 +1480,8 @@ let make = (
1414
1480
  let dumpEffectCache = async () => {
1415
1481
  try {
1416
1482
  let cacheTableInfo: array<schemaCacheTableInfo> =
1417
- (await sql
1418
- ->Postgres.unsafe(makeSchemaCacheTableInfoQuery(~pgSchema)))
1483
+ ((await sql.query({text: makeSchemaCacheTableInfoQuery(~pgSchema)})).rows
1484
+ ->(Utils.magic: array<unknown> => array<schemaCacheTableInfo>))
1419
1485
  ->Js.Array2.filter(i => i.count > 0)
1420
1486
 
1421
1487
  if cacheTableInfo->Utils.Array.notEmpty {
@@ -1493,15 +1559,15 @@ let make = (
1493
1559
  sourceBlockNumber: rawInitialState.sourceBlockNumber,
1494
1560
  })
1495
1561
  }),
1496
- sql
1497
- ->Postgres.unsafe(InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(~pgSchema))
1498
- ->(Utils.magic: promise<array<unknown>> => promise<array<{"id": string}>>),
1499
- sql
1500
- ->Postgres.unsafe(InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema))
1501
- ->(
1502
- Utils.magic: promise<array<unknown>> => promise<
1503
- array<{"id": string, "chain_id": int, "block_number": int, "block_hash": string}>,
1504
- >
1562
+ sql.query({text: InternalTable.Checkpoints.makeCommitedCheckpointIdQuery(~pgSchema)})
1563
+ ->Promise.thenResolve(r =>
1564
+ r.rows->(Utils.magic: array<unknown> => array<{"id": string}>)
1565
+ ),
1566
+ sql.query({text: InternalTable.Checkpoints.makeGetReorgCheckpointsQuery(~pgSchema)})
1567
+ ->Promise.thenResolve(r =>
1568
+ r.rows->(
1569
+ Utils.magic: array<unknown> => array<{"id": string, "chain_id": int, "block_number": int, "block_hash": string}>
1570
+ )
1505
1571
  ),
1506
1572
  ))
1507
1573
 
@@ -1532,7 +1598,7 @@ let make = (
1532
1598
 
1533
1599
  let reset = async () => {
1534
1600
  let query = `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;`
1535
- await sql->Postgres.unsafe(query)->Promise.ignoreValue
1601
+ await sql.query({text: query})->Promise.ignoreValue
1536
1602
  }
1537
1603
 
1538
1604
  let setChainMeta = chainsData =>
@@ -1569,19 +1635,21 @@ let make = (
1569
1635
  ) => {
1570
1636
  await Promise.all2((
1571
1637
  // Get IDs of entities that should be deleted (created after rollback target with no prior history)
1572
- sql
1573
- ->Postgres.preparedUnsafe(
1574
- makeGetRollbackRemovedIdsQuery(~entityConfig, ~pgSchema),
1575
- [rollbackTargetCheckpointId->BigInt.toString]->(Utils.magic: array<string> => unknown),
1576
- )
1577
- ->(Utils.magic: promise<unknown> => promise<array<{"id": string}>>),
1638
+ sql.query({
1639
+ name: `rollback_removed_${entityConfig.index->Belt.Int.toString}`,
1640
+ text: makeGetRollbackRemovedIdsQuery(~entityConfig, ~pgSchema),
1641
+ values: [rollbackTargetCheckpointId->BigInt.toString]->(Utils.magic: array<string> => array<unknown>),
1642
+ })
1643
+ ->Promise.thenResolve(r =>
1644
+ r.rows->(Utils.magic: array<unknown> => array<{"id": string}>)
1645
+ ),
1578
1646
  // Get entities that should be restored to their state at or before rollback target
1579
- sql
1580
- ->Postgres.preparedUnsafe(
1581
- makeGetRollbackRestoredEntitiesQuery(~entityConfig, ~pgSchema),
1582
- [rollbackTargetCheckpointId->BigInt.toString]->(Utils.magic: array<string> => unknown),
1583
- )
1584
- ->(Utils.magic: promise<unknown> => promise<array<unknown>>),
1647
+ sql.query({
1648
+ name: `rollback_restored_${entityConfig.index->Belt.Int.toString}`,
1649
+ text: makeGetRollbackRestoredEntitiesQuery(~entityConfig, ~pgSchema),
1650
+ values: [rollbackTargetCheckpointId->BigInt.toString]->(Utils.magic: array<string> => array<unknown>),
1651
+ })
1652
+ ->Promise.thenResolve(r => r.rows),
1585
1653
  ))
1586
1654
  }
1587
1655
 
@@ -1616,7 +1684,7 @@ let make = (
1616
1684
  }
1617
1685
 
1618
1686
  await writeBatch(
1619
- sql,
1687
+ pool,
1620
1688
  ~batch,
1621
1689
  ~rawEvents,
1622
1690
  ~pgSchema,