envio 2.24.0 → 2.25.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts CHANGED
@@ -105,6 +105,8 @@ export function experimental_createEffect<
105
105
  readonly input: IS;
106
106
  /** The output schema of the effect. */
107
107
  readonly output: OS;
108
+ /** Whether the effect should be cached. */
109
+ readonly cache?: boolean;
108
110
  },
109
111
  handler: (args: EffectArgs<I>) => Promise<R>
110
112
  ): Effect<I, O>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.24.0",
3
+ "version": "v2.25.1",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.24.0",
29
- "envio-linux-arm64": "v2.24.0",
30
- "envio-darwin-x64": "v2.24.0",
31
- "envio-darwin-arm64": "v2.24.0"
28
+ "envio-linux-x64": "v2.25.1",
29
+ "envio-linux-arm64": "v2.25.1",
30
+ "envio-darwin-x64": "v2.25.1",
31
+ "envio-darwin-arm64": "v2.25.1"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
package/src/Envio.gen.ts CHANGED
@@ -21,7 +21,9 @@ export type effectOptions<input,output> = {
21
21
  /** The input schema of the effect. */
22
22
  readonly input: RescriptSchema_S_t<input>;
23
23
  /** The output schema of the effect. */
24
- readonly output: RescriptSchema_S_t<output>
24
+ readonly output: RescriptSchema_S_t<output>;
25
+ /** Whether the effect should be cached. */
26
+ readonly cache?: boolean
25
27
  };
26
28
 
27
29
  export type effectContext = $$effectContext;
package/src/Envio.res CHANGED
@@ -22,6 +22,8 @@ and effectOptions<'input, 'output> = {
22
22
  input: S.t<'input>,
23
23
  /** The output schema of the effect. */
24
24
  output: S.t<'output>,
25
+ /** Whether the effect should be cached. */
26
+ cache?: bool,
25
27
  }
26
28
  @genType.import(("./Types.ts", "EffectContext"))
27
29
  and effectContext = {
@@ -48,5 +50,6 @@ let experimental_createEffect = (
48
50
  >
49
51
  ),
50
52
  callsCount: 0,
53
+ cache: options.cache->Belt.Option.getWithDefault(false),
51
54
  }->(Utils.magic: Internal.effect => effect<'input, 'output>)
52
55
  }
package/src/Envio.res.js CHANGED
@@ -2,12 +2,14 @@
2
2
  'use strict';
3
3
 
4
4
  var Prometheus = require("./Prometheus.res.js");
5
+ var Belt_Option = require("rescript/lib/js/belt_Option.js");
5
6
 
6
7
  function experimental_createEffect(options, handler) {
7
8
  Prometheus.EffectCallsCount.set(0, options.name);
8
9
  return {
9
10
  name: options.name,
10
11
  handler: handler,
12
+ cache: Belt_Option.getWithDefault(options.cache, false),
11
13
  callsCount: 0
12
14
  };
13
15
  }
@@ -901,10 +901,7 @@ let queueItemIsInReorgThreshold = (
901
901
  if currentBlockHeight === 0 {
902
902
  false
903
903
  } else {
904
- switch queueItem {
905
- | Item(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
906
- | NoItem(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
907
- }
904
+ queueItem->queueItemBlockNumber > highestBlockBelowThreshold
908
905
  }
909
906
  }
910
907
 
package/src/Internal.res CHANGED
@@ -197,6 +197,7 @@ type effectArgs = {
197
197
  type effect = {
198
198
  name: string,
199
199
  handler: effectArgs => promise<effectOutput>,
200
+ cache: bool,
200
201
  mutable callsCount: int,
201
202
  }
202
203
 
@@ -5,6 +5,18 @@
5
5
  // Currently there are quite many code spread across
6
6
  // DbFunctions, Db, Migrations, InMemoryStore modules which use codegen code directly.
7
7
 
8
+ // The type reflects an effect cache table in the db
9
+ // It might be present even if the effect is not used in the application
10
+ type effectCache = {
11
+ name: string,
12
+ // Number of rows in the table
13
+ mutable size: int,
14
+ // Lazily attached table definition when effect is used in the application
15
+ mutable table: option<Table.table>,
16
+ }
17
+
18
+ type operator = [#">" | #"="]
19
+
8
20
  type storage = {
9
21
  // Should return true if we already have persisted data
10
22
  // and we can skip initialization
@@ -15,9 +27,8 @@ type storage = {
15
27
  ~entities: array<Internal.entityConfig>=?,
16
28
  ~generalTables: array<Table.table>=?,
17
29
  ~enums: array<Internal.enumConfig<Internal.enum>>=?,
18
- // If true, the storage should clear existing data
19
- ~cleanRun: bool=?,
20
30
  ) => promise<unit>,
31
+ loadEffectCaches: unit => promise<array<effectCache>>,
21
32
  @raises("StorageError")
22
33
  loadByIdsOrThrow: 'item. (
23
34
  ~ids: array<string>,
@@ -25,6 +36,15 @@ type storage = {
25
36
  ~rowsSchema: S.t<array<'item>>,
26
37
  ) => promise<array<'item>>,
27
38
  @raises("StorageError")
39
+ loadByFieldOrThrow: 'item 'value. (
40
+ ~fieldName: string,
41
+ ~fieldSchema: S.t<'value>,
42
+ ~fieldValue: 'value,
43
+ ~operator: operator,
44
+ ~table: Table.table,
45
+ ~rowsSchema: S.t<array<'item>>,
46
+ ) => promise<array<'item>>,
47
+ @raises("StorageError")
28
48
  setOrThrow: 'item. (
29
49
  ~items: array<'item>,
30
50
  ~table: Table.table,
@@ -37,7 +57,7 @@ exception StorageError({message: string, reason: exn})
37
57
  type storageStatus =
38
58
  | Unknown
39
59
  | Initializing(promise<unit>)
40
- | Ready({cleanRun: bool})
60
+ | Ready({cleanRun: bool, effectCaches: dict<effectCache>})
41
61
 
42
62
  type t = {
43
63
  userEntities: array<Internal.entityConfig>,
@@ -79,14 +99,7 @@ let make = (
79
99
  }
80
100
  }
81
101
 
82
- let init = async (
83
- persistence,
84
- // There are not much sense in the option,
85
- // but this is how the runUpMigration used to work
86
- // and we want to keep the upsert behavior without breaking changes.
87
- ~skipIsInitializedCheck=false,
88
- ~reset=false,
89
- ) => {
102
+ let init = async (persistence, ~reset=false) => {
90
103
  try {
91
104
  let shouldRun = switch persistence.storageStatus {
92
105
  | Unknown => true
@@ -102,20 +115,37 @@ let init = async (
102
115
  resolveRef := resolve
103
116
  })
104
117
  persistence.storageStatus = Initializing(promise)
105
- if !(reset || skipIsInitializedCheck) && (await persistence.storage.isInitialized()) {
106
- persistence.storageStatus = Ready({cleanRun: false})
107
- } else {
118
+ if reset || !(await persistence.storage.isInitialized()) {
108
119
  let _ = await persistence.storage.initialize(
109
120
  ~entities=persistence.allEntities,
110
121
  ~generalTables=persistence.staticTables,
111
122
  ~enums=persistence.allEnums,
112
- ~cleanRun=reset || !skipIsInitializedCheck,
113
123
  )
114
- persistence.storageStatus = Ready({cleanRun: true})
124
+
125
+ persistence.storageStatus = Ready({
126
+ cleanRun: true,
127
+ effectCaches: Js.Dict.empty(),
128
+ })
115
129
  switch persistence.onStorageInitialize {
116
130
  | Some(onStorageInitialize) => await onStorageInitialize()
117
131
  | None => ()
118
132
  }
133
+ } else if (
134
+ // In case of a race condition,
135
+ // we want to set the initial status to Ready only once.
136
+ switch persistence.storageStatus {
137
+ | Initializing(_) => true
138
+ | _ => false
139
+ }
140
+ ) {
141
+ let effectCaches = Js.Dict.empty()
142
+ (await persistence.storage.loadEffectCaches())->Js.Array2.forEach(effectCache => {
143
+ effectCaches->Js.Dict.set(effectCache.name, effectCache)
144
+ })
145
+ persistence.storageStatus = Ready({
146
+ cleanRun: false,
147
+ effectCaches,
148
+ })
119
149
  }
120
150
  resolveRef.contents()
121
151
  }
@@ -36,8 +36,7 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
36
36
  };
37
37
  }
38
38
 
39
- async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
40
- var skipIsInitializedCheck = skipIsInitializedCheckOpt !== undefined ? skipIsInitializedCheckOpt : false;
39
+ async function init(persistence, resetOpt) {
41
40
  var reset = resetOpt !== undefined ? resetOpt : false;
42
41
  try {
43
42
  var promise = persistence.storageStatus;
@@ -63,22 +62,34 @@ async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
63
62
  TAG: "Initializing",
64
63
  _0: promise$1
65
64
  };
66
- if (!(reset || skipIsInitializedCheck) && await persistence.storage.isInitialized()) {
65
+ if (reset || !await persistence.storage.isInitialized()) {
66
+ await persistence.storage.initialize(persistence.allEntities, persistence.staticTables, persistence.allEnums);
67
67
  persistence.storageStatus = {
68
68
  TAG: "Ready",
69
- cleanRun: false
70
- };
71
- } else {
72
- await persistence.storage.initialize(persistence.allEntities, persistence.staticTables, persistence.allEnums, reset || !skipIsInitializedCheck);
73
- persistence.storageStatus = {
74
- TAG: "Ready",
75
- cleanRun: true
69
+ cleanRun: true,
70
+ effectCaches: {}
76
71
  };
77
72
  var onStorageInitialize = persistence.onStorageInitialize;
78
73
  if (onStorageInitialize !== undefined) {
79
74
  await onStorageInitialize();
80
75
  }
81
76
 
77
+ } else {
78
+ var match = persistence.storageStatus;
79
+ var tmp;
80
+ tmp = typeof match !== "object" || match.TAG !== "Initializing" ? false : true;
81
+ if (tmp) {
82
+ var effectCaches = {};
83
+ (await persistence.storage.loadEffectCaches()).forEach(function (effectCache) {
84
+ effectCaches[effectCache.name] = effectCache;
85
+ });
86
+ persistence.storageStatus = {
87
+ TAG: "Ready",
88
+ cleanRun: false,
89
+ effectCaches: effectCaches
90
+ };
91
+ }
92
+
82
93
  }
83
94
  return resolveRef.contents();
84
95
  }
package/src/PgStorage.res CHANGED
@@ -1,16 +1,16 @@
1
- let makeCreateIndexSql = (~tableName, ~indexFields, ~pgSchema) => {
1
+ let makeCreateIndexQuery = (~tableName, ~indexFields, ~pgSchema) => {
2
2
  let indexName = tableName ++ "_" ++ indexFields->Js.Array2.joinWith("_")
3
3
  let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Js.Array2.joinWith(", ")
4
4
  `CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
5
5
  }
6
6
 
7
- let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
7
+ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
8
8
  open Belt
9
9
  let tableName = table.tableName
10
10
  let createIndex = indexField =>
11
- makeCreateIndexSql(~tableName, ~indexFields=[indexField], ~pgSchema)
11
+ makeCreateIndexQuery(~tableName, ~indexFields=[indexField], ~pgSchema)
12
12
  let createCompositeIndex = indexFields => {
13
- makeCreateIndexSql(~tableName, ~indexFields, ~pgSchema)
13
+ makeCreateIndexQuery(~tableName, ~indexFields, ~pgSchema)
14
14
  }
15
15
 
16
16
  let singleIndices = table->Table.getSingleIndices
@@ -20,7 +20,7 @@ let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
20
20
  compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
21
21
  }
22
22
 
23
- let makeCreateTableSql = (table: Table.table, ~pgSchema) => {
23
+ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
24
24
  open Belt
25
25
  let fieldsMapped =
26
26
  table
@@ -58,7 +58,6 @@ let makeInitializeTransaction = (
58
58
  ~generalTables=[],
59
59
  ~entities=[],
60
60
  ~enums=[],
61
- ~cleanRun=false,
62
61
  ) => {
63
62
  let allTables = generalTables->Array.copy
64
63
  let allEntityTables = []
@@ -70,13 +69,9 @@ let makeInitializeTransaction = (
70
69
  let derivedSchema = Schema.make(allEntityTables)
71
70
 
72
71
  let query = ref(
73
- (
74
- cleanRun
75
- ? `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
76
- CREATE SCHEMA "${pgSchema}";`
77
- : `CREATE SCHEMA IF NOT EXISTS "${pgSchema}";`
78
- ) ++
79
- `GRANT ALL ON SCHEMA "${pgSchema}" TO "${pgUser}";
72
+ `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
73
+ CREATE SCHEMA "${pgSchema}";
74
+ GRANT ALL ON SCHEMA "${pgSchema}" TO "${pgUser}";
80
75
  GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
81
76
  )
82
77
 
@@ -87,31 +82,17 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
87
82
  ->Js.Array2.map(v => `'${v->(Utils.magic: Internal.enum => string)}'`)
88
83
  ->Js.Array2.joinWith(", ")});`
89
84
 
90
- query :=
91
- query.contents ++
92
- "\n" ++ if cleanRun {
93
- // Direct creation when cleanRunting (faster)
94
- enumCreateQuery
95
- } else {
96
- // Wrap with conditional check only when not cleanRunting
97
- `IF NOT EXISTS (
98
- SELECT 1 FROM pg_type
99
- WHERE typname = '${enumConfig.name->Js.String2.toLowerCase}'
100
- AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '${pgSchema}')
101
- ) THEN
102
- ${enumCreateQuery}
103
- END IF;`
104
- }
85
+ query := query.contents ++ "\n" ++ enumCreateQuery
105
86
  })
106
87
 
107
88
  // Batch all table creation first (optimal for PostgreSQL)
108
89
  allTables->Js.Array2.forEach((table: Table.table) => {
109
- query := query.contents ++ "\n" ++ makeCreateTableSql(table, ~pgSchema)
90
+ query := query.contents ++ "\n" ++ makeCreateTableQuery(table, ~pgSchema)
110
91
  })
111
92
 
112
93
  // Then batch all indices (better performance when tables exist)
113
94
  allTables->Js.Array2.forEach((table: Table.table) => {
114
- let indices = makeCreateTableIndicesSql(table, ~pgSchema)
95
+ let indices = makeCreateTableIndicesQuery(table, ~pgSchema)
115
96
  if indices !== "" {
116
97
  query := query.contents ++ "\n" ++ indices
117
98
  }
@@ -131,7 +112,7 @@ END IF;`
131
112
  query :=
132
113
  query.contents ++
133
114
  "\n" ++
134
- makeCreateIndexSql(
115
+ makeCreateIndexQuery(
135
116
  ~tableName=derivedFromField.derivedFromEntity,
136
117
  ~indexFields=[indexField],
137
118
  ~pgSchema,
@@ -139,28 +120,26 @@ END IF;`
139
120
  })
140
121
  })
141
122
 
142
- [
143
- // Return optimized queries - main DDL in DO block, functions separate
144
- // Note: DO $$ BEGIN wrapper is only needed for PL/pgSQL conditionals (IF NOT EXISTS)
145
- // Reset case uses direct DDL (faster), non-cleanRun case uses conditionals (safer)
146
- cleanRun || enums->Utils.Array.isEmpty
147
- ? query.contents
148
- : `DO $$ BEGIN ${query.contents} END $$;`,
149
- // Functions query (separate as they can't be in DO block)
150
- ]->Js.Array2.concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : [])
123
+ [query.contents]->Js.Array2.concat(
124
+ functionsQuery.contents !== "" ? [functionsQuery.contents] : [],
125
+ )
151
126
  }
152
127
 
153
- let makeLoadByIdSql = (~pgSchema, ~tableName) => {
128
+ let makeLoadByIdQuery = (~pgSchema, ~tableName) => {
154
129
  `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = $1 LIMIT 1;`
155
130
  }
156
131
 
157
- let makeLoadByIdsSql = (~pgSchema, ~tableName) => {
132
+ let makeLoadByFieldQuery = (~pgSchema, ~tableName, ~fieldName, ~operator) => {
133
+ `SELECT * FROM "${pgSchema}"."${tableName}" WHERE "${fieldName}" ${operator} $1;`
134
+ }
135
+
136
+ let makeLoadByIdsQuery = (~pgSchema, ~tableName) => {
158
137
  `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
159
138
  }
160
139
 
161
- let makeInsertUnnestSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
140
+ let makeInsertUnnestSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
162
141
  let {quotedFieldNames, quotedNonPrimaryFieldNames, arrayFieldTypes} =
163
- table->Table.toSqlParams(~schema=itemSchema)
142
+ table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
164
143
 
165
144
  let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
166
145
 
@@ -188,8 +167,9 @@ SELECT * FROM unnest(${arrayFieldTypes
188
167
  } ++ ";"
189
168
  }
190
169
 
191
- let makeInsertValuesSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
192
- let {quotedFieldNames, quotedNonPrimaryFieldNames} = table->Table.toSqlParams(~schema=itemSchema)
170
+ let makeInsertValuesSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
171
+ let {quotedFieldNames, quotedNonPrimaryFieldNames} =
172
+ table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
193
173
 
194
174
  let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
195
175
  let fieldsCount = quotedFieldNames->Array.length
@@ -236,12 +216,13 @@ VALUES${placeholders.contents}` ++
236
216
  // they are always guaranteed to be an object.
237
217
  // FIXME what about Fuel params?
238
218
  let rawEventsTableName = "raw_events"
219
+ let eventSyncStateTableName = "event_sync_state"
239
220
 
240
221
  // Constants for chunking
241
222
  let maxItemsPerQuery = 500
242
223
 
243
224
  let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
244
- let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema)
225
+ let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
245
226
  let isRawEvents = table.tableName === rawEventsTableName
246
227
 
247
228
  // Should experiment how much it'll affect performance
@@ -253,7 +234,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
253
234
 
254
235
  if isRawEvents || !hasArrayField {
255
236
  {
256
- "sql": makeInsertUnnestSetSql(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
237
+ "query": makeInsertUnnestSetQuery(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
257
238
  "convertOrThrow": S.compile(
258
239
  S.unnest(dbSchema),
259
240
  ~input=Value,
@@ -265,7 +246,12 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
265
246
  }
266
247
  } else {
267
248
  {
268
- "sql": makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=maxItemsPerQuery),
249
+ "query": makeInsertValuesSetQuery(
250
+ ~pgSchema,
251
+ ~table,
252
+ ~itemSchema,
253
+ ~itemsCount=maxItemsPerQuery,
254
+ ),
269
255
  "convertOrThrow": S.compile(
270
256
  S.unnest(itemSchema)->S.preprocess(_ => {
271
257
  serializer: Utils.Array.flatten->Utils.magic,
@@ -291,6 +277,35 @@ let chunkArray = (arr: array<'a>, ~chunkSize) => {
291
277
  chunks
292
278
  }
293
279
 
280
+ let removeInvalidUtf8InPlace = entities =>
281
+ entities->Js.Array2.forEach(item => {
282
+ let dict = item->(Utils.magic: 'a => dict<unknown>)
283
+ dict->Utils.Dict.forEachWithKey((key, value) => {
284
+ if value->Js.typeof === "string" {
285
+ let value = value->(Utils.magic: unknown => string)
286
+ // We mutate here, since we don't care
287
+ // about the original value with \x00 anyways.
288
+ //
289
+ // This is unsafe, but we rely that it'll use
290
+ // the mutated reference on retry.
291
+ // TODO: Test it properly after we start using
292
+ // in-memory PGLite for indexer test framework.
293
+ dict->Js.Dict.set(
294
+ key,
295
+ value
296
+ ->Utils.String.replaceAll("\x00", "")
297
+ ->(Utils.magic: string => unknown),
298
+ )
299
+ }
300
+ })
301
+ })
302
+
303
+ let pgEncodingErrorSchema = S.object(s =>
304
+ s.tag("message", `invalid byte sequence for encoding "UTF8": 0x00`)
305
+ )
306
+
307
+ exception PgEncodingError({table: Table.table})
308
+
294
309
  // WeakMap for caching table batch set queries
295
310
  let setQueryCache = Utils.WeakMap.make()
296
311
  let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
@@ -298,7 +313,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
298
313
  ()
299
314
  } else {
300
315
  // Get or create cached query for this table
301
- let query = switch setQueryCache->Utils.WeakMap.get(table) {
316
+ let data = switch setQueryCache->Utils.WeakMap.get(table) {
302
317
  | Some(cached) => cached
303
318
  | None => {
304
319
  let newQuery = makeTableBatchSetQuery(
@@ -311,10 +326,8 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
311
326
  }
312
327
  }
313
328
 
314
- let sqlQuery = query["sql"]
315
-
316
329
  try {
317
- if query["isInsertValues"] {
330
+ if data["isInsertValues"] {
318
331
  let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
319
332
  let responses = []
320
333
  chunks->Js.Array2.forEach(chunk => {
@@ -325,9 +338,9 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
325
338
  // Either use the sql query for full chunks from cache
326
339
  // or create a new one for partial chunks on the fly.
327
340
  isFullChunk
328
- ? sqlQuery
329
- : makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
330
- query["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
341
+ ? data["query"]
342
+ : makeInsertValuesSetQuery(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
343
+ data["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
331
344
  )
332
345
  responses->Js.Array2.push(response)->ignore
333
346
  })
@@ -335,8 +348,8 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
335
348
  } else {
336
349
  // Use UNNEST approach for single query
337
350
  await sql->Postgres.preparedUnsafe(
338
- sqlQuery,
339
- query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
351
+ data["query"],
352
+ data["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
340
353
  )
341
354
  }
342
355
  } catch {
@@ -358,41 +371,112 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
358
371
  }
359
372
  }
360
373
 
374
+ let setEntityHistoryOrThrow = (
375
+ sql,
376
+ ~entityHistory: EntityHistory.t<'entity>,
377
+ ~rows: array<EntityHistory.historyRow<'entity>>,
378
+ ~shouldCopyCurrentEntity=?,
379
+ ~shouldRemoveInvalidUtf8=false,
380
+ ) => {
381
+ rows
382
+ ->Belt.Array.map(historyRow => {
383
+ let row = historyRow->S.reverseConvertToJsonOrThrow(entityHistory.schema)
384
+ if shouldRemoveInvalidUtf8 {
385
+ [row]->removeInvalidUtf8InPlace
386
+ }
387
+ entityHistory.insertFn(
388
+ sql,
389
+ row,
390
+ ~shouldCopyCurrentEntity=switch shouldCopyCurrentEntity {
391
+ | Some(v) => v
392
+ | None => {
393
+ let containsRollbackDiffChange =
394
+ historyRow.containsRollbackDiffChange->Belt.Option.getWithDefault(false)
395
+ !containsRollbackDiffChange
396
+ }
397
+ },
398
+ )
399
+ })
400
+ ->Promise.all
401
+ ->(Utils.magic: promise<array<unit>> => promise<unit>)
402
+ }
403
+
404
+ type schemaTableName = {
405
+ @as("table_name")
406
+ tableName: string,
407
+ }
408
+
409
+ let makeSchemaTableNamesQuery = (~pgSchema) => {
410
+ `SELECT table_name FROM information_schema.tables WHERE table_schema = '${pgSchema}';`
411
+ }
412
+
361
413
  let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
362
414
  let isInitialized = async () => {
363
- let schemas =
415
+ let envioTables =
364
416
  await sql->Postgres.unsafe(
365
- `SELECT schema_name FROM information_schema.schemata WHERE schema_name = '${pgSchema}';`,
417
+ `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${eventSyncStateTableName}';`,
366
418
  )
367
- schemas->Utils.Array.notEmpty
419
+ envioTables->Utils.Array.notEmpty
368
420
  }
369
421
 
370
- let initialize = async (~entities=[], ~generalTables=[], ~enums=[], ~cleanRun=false) => {
371
- let queries = makeInitializeTransaction(
372
- ~pgSchema,
373
- ~pgUser,
374
- ~generalTables,
375
- ~entities,
376
- ~enums,
377
- ~cleanRun,
378
- )
422
+ let initialize = async (~entities=[], ~generalTables=[], ~enums=[]) => {
423
+ let schemaTableNames: array<schemaTableName> =
424
+ await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
425
+
426
+ // The initialization query will completely drop the schema and recreate it from scratch.
427
+ // So we need to check if the schema is not used for anything else than envio.
428
+ if (
429
+ // Should pass with existing schema with no tables
430
+ // This might happen when used with public schema
431
+ // which is automatically created by postgres.
432
+ schemaTableNames->Utils.Array.notEmpty &&
433
+ // Otherwise should throw if there's a table, but no envio specific one
434
+ // This means that the schema is used for something else than envio.
435
+ !(schemaTableNames->Js.Array2.some(table => table.tableName === eventSyncStateTableName))
436
+ ) {
437
+ Js.Exn.raiseError(
438
+ `Cannot run Envio migrations on PostgreSQL schema "${pgSchema}" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: "pnpm envio local db-migrate down"\n2. Or specify a different schema name by setting the "ENVIO_PG_PUBLIC_SCHEMA" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.`,
439
+ )
440
+ }
441
+
442
+ let queries = makeInitializeTransaction(~pgSchema, ~pgUser, ~generalTables, ~entities, ~enums)
379
443
  // Execute all queries within a single transaction for integrity
380
444
  let _ = await sql->Postgres.beginSql(sql => {
381
445
  queries->Js.Array2.map(query => sql->Postgres.unsafe(query))
382
446
  })
383
447
  }
384
448
 
449
+ let loadEffectCaches = async () => {
450
+ let schemaTableNames: array<schemaTableName> =
451
+ await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
452
+ schemaTableNames->Belt.Array.keepMapU(schemaTableName => {
453
+ if schemaTableName.tableName->Js.String2.startsWith("effect_cache_") {
454
+ Some(
455
+ (
456
+ {
457
+ name: schemaTableName.tableName,
458
+ size: 0,
459
+ table: None,
460
+ }: Persistence.effectCache
461
+ ),
462
+ )
463
+ } else {
464
+ None
465
+ }
466
+ })
467
+ }
468
+
385
469
  let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
386
470
  switch await (
387
471
  switch ids {
388
472
  | [_] =>
389
473
  sql->Postgres.preparedUnsafe(
390
- makeLoadByIdSql(~pgSchema, ~tableName=table.tableName),
474
+ makeLoadByIdQuery(~pgSchema, ~tableName=table.tableName),
391
475
  ids->Obj.magic,
392
476
  )
393
477
  | _ =>
394
478
  sql->Postgres.preparedUnsafe(
395
- makeLoadByIdsSql(~pgSchema, ~tableName=table.tableName),
479
+ makeLoadByIdsQuery(~pgSchema, ~tableName=table.tableName),
396
480
  [ids]->Obj.magic,
397
481
  )
398
482
  }
@@ -417,6 +501,52 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
417
501
  }
418
502
  }
419
503
 
504
+ let loadByFieldOrThrow = async (
505
+ ~fieldName: string,
506
+ ~fieldSchema,
507
+ ~fieldValue,
508
+ ~operator: Persistence.operator,
509
+ ~table: Table.table,
510
+ ~rowsSchema,
511
+ ) => {
512
+ let params = try [fieldValue->S.reverseConvertToJsonOrThrow(fieldSchema)]->Obj.magic catch {
513
+ | exn =>
514
+ raise(
515
+ Persistence.StorageError({
516
+ message: `Failed loading "${table.tableName}" from storage by field "${fieldName}". Couldn't serialize provided value.`,
517
+ reason: exn,
518
+ }),
519
+ )
520
+ }
521
+ switch await sql->Postgres.preparedUnsafe(
522
+ makeLoadByFieldQuery(
523
+ ~pgSchema,
524
+ ~tableName=table.tableName,
525
+ ~fieldName,
526
+ ~operator=(operator :> string),
527
+ ),
528
+ params,
529
+ ) {
530
+ | exception exn =>
531
+ raise(
532
+ Persistence.StorageError({
533
+ message: `Failed loading "${table.tableName}" from storage by field "${fieldName}"`,
534
+ reason: exn,
535
+ }),
536
+ )
537
+ | rows =>
538
+ try rows->S.parseOrThrow(rowsSchema) catch {
539
+ | exn =>
540
+ raise(
541
+ Persistence.StorageError({
542
+ message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
543
+ reason: exn,
544
+ }),
545
+ )
546
+ }
547
+ }
548
+ }
549
+
420
550
  let setOrThrow = (
421
551
  type item,
422
552
  ~items: array<item>,
@@ -435,6 +565,8 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
435
565
  {
436
566
  isInitialized,
437
567
  initialize,
568
+ loadByFieldOrThrow,
569
+ loadEffectCaches,
438
570
  loadByIdsOrThrow,
439
571
  setOrThrow,
440
572
  }
@@ -4,14 +4,17 @@
4
4
  var $$Array = require("rescript/lib/js/array.js");
5
5
  var Table = require("./db/Table.res.js");
6
6
  var Utils = require("./Utils.res.js");
7
+ var Js_exn = require("rescript/lib/js/js_exn.js");
7
8
  var Schema = require("./db/Schema.res.js");
8
9
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
10
+ var Belt_Option = require("rescript/lib/js/belt_Option.js");
9
11
  var Caml_option = require("rescript/lib/js/caml_option.js");
10
12
  var Persistence = require("./Persistence.res.js");
13
+ var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
11
14
  var S$RescriptSchema = require("rescript-schema/src/S.res.js");
12
15
  var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
13
16
 
14
- function makeCreateIndexSql(tableName, indexFields, pgSchema) {
17
+ function makeCreateIndexQuery(tableName, indexFields, pgSchema) {
15
18
  var indexName = tableName + "_" + indexFields.join("_");
16
19
  var index = Belt_Array.map(indexFields, (function (idx) {
17
20
  return "\"" + idx + "\"";
@@ -19,20 +22,20 @@ function makeCreateIndexSql(tableName, indexFields, pgSchema) {
19
22
  return "CREATE INDEX IF NOT EXISTS \"" + indexName + "\" ON \"" + pgSchema + "\".\"" + tableName + "\"(" + index + ");";
20
23
  }
21
24
 
22
- function makeCreateTableIndicesSql(table, pgSchema) {
25
+ function makeCreateTableIndicesQuery(table, pgSchema) {
23
26
  var tableName = table.tableName;
24
27
  var createIndex = function (indexField) {
25
- return makeCreateIndexSql(tableName, [indexField], pgSchema);
28
+ return makeCreateIndexQuery(tableName, [indexField], pgSchema);
26
29
  };
27
30
  var createCompositeIndex = function (indexFields) {
28
- return makeCreateIndexSql(tableName, indexFields, pgSchema);
31
+ return makeCreateIndexQuery(tableName, indexFields, pgSchema);
29
32
  };
30
33
  var singleIndices = Table.getSingleIndices(table);
31
34
  var compositeIndices = Table.getCompositeIndices(table);
32
35
  return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
33
36
  }
34
37
 
35
- function makeCreateTableSql(table, pgSchema) {
38
+ function makeCreateTableQuery(table, pgSchema) {
36
39
  var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
37
40
  var defaultValue = field.defaultValue;
38
41
  var fieldType = field.fieldType;
@@ -56,11 +59,10 @@ function makeCreateTableSql(table, pgSchema) {
56
59
  ) + ");";
57
60
  }
58
61
 
59
- function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt, cleanRunOpt) {
62
+ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt) {
60
63
  var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
61
64
  var entities = entitiesOpt !== undefined ? entitiesOpt : [];
62
65
  var enums = enumsOpt !== undefined ? enumsOpt : [];
63
- var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
64
66
  var allTables = $$Array.copy(generalTables);
65
67
  var allEntityTables = [];
66
68
  entities.forEach(function (entity) {
@@ -70,23 +72,19 @@ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesO
70
72
  });
71
73
  var derivedSchema = Schema.make(allEntityTables);
72
74
  var query = {
73
- contents: (
74
- cleanRun ? "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";" : "CREATE SCHEMA IF NOT EXISTS \"" + pgSchema + "\";"
75
- ) + ("GRANT ALL ON SCHEMA \"" + pgSchema + "\" TO \"" + pgUser + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;")
75
+ contents: "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO \"" + pgUser + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;"
76
76
  };
77
77
  enums.forEach(function (enumConfig) {
78
78
  var enumCreateQuery = "CREATE TYPE \"" + pgSchema + "\"." + enumConfig.name + " AS ENUM(" + enumConfig.variants.map(function (v) {
79
79
  return "'" + v + "'";
80
80
  }).join(", ") + ");";
81
- query.contents = query.contents + "\n" + (
82
- cleanRun ? enumCreateQuery : "IF NOT EXISTS (\n SELECT 1 FROM pg_type \n WHERE typname = '" + enumConfig.name.toLowerCase() + "' \n AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '" + pgSchema + "')\n) THEN \n " + enumCreateQuery + "\nEND IF;"
83
- );
81
+ query.contents = query.contents + "\n" + enumCreateQuery;
84
82
  });
85
83
  allTables.forEach(function (table) {
86
- query.contents = query.contents + "\n" + makeCreateTableSql(table, pgSchema);
84
+ query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema);
87
85
  });
88
86
  allTables.forEach(function (table) {
89
- var indices = makeCreateTableIndicesSql(table, pgSchema);
87
+ var indices = makeCreateTableIndicesQuery(table, pgSchema);
90
88
  if (indices !== "") {
91
89
  query.contents = query.contents + "\n" + indices;
92
90
  return ;
@@ -100,22 +98,26 @@ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesO
100
98
  functionsQuery.contents = functionsQuery.contents + "\n" + entity.entityHistory.createInsertFnQuery;
101
99
  Table.getDerivedFromFields(entity.table).forEach(function (derivedFromField) {
102
100
  var indexField = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(derivedSchema, derivedFromField));
103
- query.contents = query.contents + "\n" + makeCreateIndexSql(derivedFromField.derivedFromEntity, [indexField], pgSchema);
101
+ query.contents = query.contents + "\n" + makeCreateIndexQuery(derivedFromField.derivedFromEntity, [indexField], pgSchema);
104
102
  });
105
103
  });
106
- return [cleanRun || Utils.$$Array.isEmpty(enums) ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
104
+ return [query.contents].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
107
105
  }
108
106
 
109
- function makeLoadByIdSql(pgSchema, tableName) {
107
+ function makeLoadByIdQuery(pgSchema, tableName) {
110
108
  return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = $1 LIMIT 1;";
111
109
  }
112
110
 
113
- function makeLoadByIdsSql(pgSchema, tableName) {
111
+ function makeLoadByFieldQuery(pgSchema, tableName, fieldName, operator) {
112
+ return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE \"" + fieldName + "\" " + operator + " $1;";
113
+ }
114
+
115
+ function makeLoadByIdsQuery(pgSchema, tableName) {
114
116
  return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = ANY($1::text[]);";
115
117
  }
116
118
 
117
- function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
118
- var match = Table.toSqlParams(table, itemSchema);
119
+ function makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents) {
120
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
119
121
  var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
120
122
  var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
121
123
  return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + match.quotedFieldNames.join(", ") + ")\nSELECT * FROM unnest(" + match.arrayFieldTypes.map(function (arrayFieldType, idx) {
@@ -131,8 +133,8 @@ function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
131
133
  ) + ";";
132
134
  }
133
135
 
134
- function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
135
- var match = Table.toSqlParams(table, itemSchema);
136
+ function makeInsertValuesSetQuery(pgSchema, table, itemSchema, itemsCount) {
137
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
136
138
  var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
137
139
  var quotedFieldNames = match.quotedFieldNames;
138
140
  var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
@@ -164,18 +166,20 @@ function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
164
166
 
165
167
  var rawEventsTableName = "raw_events";
166
168
 
169
+ var eventSyncStateTableName = "event_sync_state";
170
+
167
171
  function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
168
- var match = Table.toSqlParams(table, itemSchema);
172
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
169
173
  var isRawEvents = table.tableName === rawEventsTableName;
170
174
  if (isRawEvents || !match.hasArrayField) {
171
175
  return {
172
- sql: makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents),
176
+ query: makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents),
173
177
  convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.unnest(match.dbSchema), "Output", "Input", "Sync", false),
174
178
  isInsertValues: false
175
179
  };
176
180
  } else {
177
181
  return {
178
- sql: makeInsertValuesSetSql(pgSchema, table, itemSchema, 500),
182
+ query: makeInsertValuesSetQuery(pgSchema, table, itemSchema, 500),
179
183
  convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.preprocess(S$RescriptSchema.unnest(itemSchema), (function (param) {
180
184
  return {
181
185
  s: (function (prim) {
@@ -199,6 +203,24 @@ function chunkArray(arr, chunkSize) {
199
203
  return chunks;
200
204
  }
201
205
 
206
+ function removeInvalidUtf8InPlace(entities) {
207
+ entities.forEach(function (item) {
208
+ Utils.Dict.forEachWithKey(item, (function (key, value) {
209
+ if (typeof value === "string") {
210
+ item[key] = value.replaceAll("\x00", "");
211
+ return ;
212
+ }
213
+
214
+ }));
215
+ });
216
+ }
217
+
218
+ var pgEncodingErrorSchema = S$RescriptSchema.object(function (s) {
219
+ s.tag("message", "invalid byte sequence for encoding \"UTF8\": 0x00");
220
+ });
221
+
222
+ var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
223
+
202
224
  var setQueryCache = new WeakMap();
203
225
 
204
226
  async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
@@ -206,25 +228,24 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
206
228
  return ;
207
229
  }
208
230
  var cached = setQueryCache.get(table);
209
- var query;
231
+ var data;
210
232
  if (cached !== undefined) {
211
- query = Caml_option.valFromOption(cached);
233
+ data = Caml_option.valFromOption(cached);
212
234
  } else {
213
235
  var newQuery = makeTableBatchSetQuery(pgSchema, table, itemSchema);
214
236
  setQueryCache.set(table, newQuery);
215
- query = newQuery;
237
+ data = newQuery;
216
238
  }
217
- var sqlQuery = query.sql;
218
239
  try {
219
- if (!query.isInsertValues) {
220
- return await sql.unsafe(sqlQuery, query.convertOrThrow(items), {prepare: true});
240
+ if (!data.isInsertValues) {
241
+ return await sql.unsafe(data.query, data.convertOrThrow(items), {prepare: true});
221
242
  }
222
243
  var chunks = chunkArray(items, 500);
223
244
  var responses = [];
224
245
  chunks.forEach(function (chunk) {
225
246
  var chunkSize = chunk.length;
226
247
  var isFullChunk = chunkSize === 500;
227
- var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema, chunkSize), query.convertOrThrow(chunk), {prepare: true});
248
+ var response = sql.unsafe(isFullChunk ? data.query : makeInsertValuesSetQuery(pgSchema, table, itemSchema, chunkSize), data.convertOrThrow(chunk), {prepare: true});
228
249
  responses.push(response);
229
250
  });
230
251
  await Promise.all(responses);
@@ -249,28 +270,61 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
249
270
  }
250
271
  }
251
272
 
273
+ function setEntityHistoryOrThrow(sql, entityHistory, rows, shouldCopyCurrentEntity, shouldRemoveInvalidUtf8Opt) {
274
+ var shouldRemoveInvalidUtf8 = shouldRemoveInvalidUtf8Opt !== undefined ? shouldRemoveInvalidUtf8Opt : false;
275
+ return Promise.all(Belt_Array.map(rows, (function (historyRow) {
276
+ var row = S$RescriptSchema.reverseConvertToJsonOrThrow(historyRow, entityHistory.schema);
277
+ if (shouldRemoveInvalidUtf8) {
278
+ removeInvalidUtf8InPlace([row]);
279
+ }
280
+ return entityHistory.insertFn(sql, row, shouldCopyCurrentEntity !== undefined ? shouldCopyCurrentEntity : !Belt_Option.getWithDefault(historyRow.containsRollbackDiffChange, false));
281
+ })));
282
+ }
283
+
284
+ function makeSchemaTableNamesQuery(pgSchema) {
285
+ return "SELECT table_name FROM information_schema.tables WHERE table_schema = '" + pgSchema + "';";
286
+ }
287
+
252
288
  function make(sql, pgSchema, pgUser) {
253
289
  var isInitialized = async function () {
254
- var schemas = await sql.unsafe("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '" + pgSchema + "';");
255
- return Utils.$$Array.notEmpty(schemas);
290
+ var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND table_name = '" + eventSyncStateTableName + "';");
291
+ return Utils.$$Array.notEmpty(envioTables);
256
292
  };
257
- var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt, cleanRunOpt) {
293
+ var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt) {
258
294
  var entities = entitiesOpt !== undefined ? entitiesOpt : [];
259
295
  var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
260
296
  var enums = enumsOpt !== undefined ? enumsOpt : [];
261
- var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
262
- var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums, cleanRun);
297
+ var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
298
+ if (Utils.$$Array.notEmpty(schemaTableNames) && !schemaTableNames.some(function (table) {
299
+ return table.table_name === eventSyncStateTableName;
300
+ })) {
301
+ Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
302
+ }
303
+ var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums);
263
304
  await sql.begin(function (sql) {
264
305
  return queries.map(function (query) {
265
306
  return sql.unsafe(query);
266
307
  });
267
308
  });
268
309
  };
310
+ var loadEffectCaches = async function () {
311
+ var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
312
+ return Belt_Array.keepMapU(schemaTableNames, (function (schemaTableName) {
313
+ if (schemaTableName.table_name.startsWith("effect_cache_")) {
314
+ return {
315
+ name: schemaTableName.table_name,
316
+ size: 0,
317
+ table: undefined
318
+ };
319
+ }
320
+
321
+ }));
322
+ };
269
323
  var loadByIdsOrThrow = async function (ids, table, rowsSchema) {
270
324
  var rows;
271
325
  try {
272
326
  rows = await (
273
- ids.length !== 1 ? sql.unsafe(makeLoadByIdsSql(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdSql(pgSchema, table.tableName), ids, {prepare: true})
327
+ ids.length !== 1 ? sql.unsafe(makeLoadByIdsQuery(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdQuery(pgSchema, table.tableName), ids, {prepare: true})
274
328
  );
275
329
  }
276
330
  catch (raw_exn){
@@ -295,32 +349,81 @@ function make(sql, pgSchema, pgUser) {
295
349
  };
296
350
  }
297
351
  };
352
+ var loadByFieldOrThrow = async function (fieldName, fieldSchema, fieldValue, operator, table, rowsSchema) {
353
+ var params;
354
+ try {
355
+ params = [S$RescriptSchema.reverseConvertToJsonOrThrow(fieldValue, fieldSchema)];
356
+ }
357
+ catch (raw_exn){
358
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
359
+ throw {
360
+ RE_EXN_ID: Persistence.StorageError,
361
+ message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\". Couldn't serialize provided value.",
362
+ reason: exn,
363
+ Error: new Error()
364
+ };
365
+ }
366
+ var rows;
367
+ try {
368
+ rows = await sql.unsafe(makeLoadByFieldQuery(pgSchema, table.tableName, fieldName, operator), params, {prepare: true});
369
+ }
370
+ catch (raw_exn$1){
371
+ var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
372
+ throw {
373
+ RE_EXN_ID: Persistence.StorageError,
374
+ message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\"",
375
+ reason: exn$1,
376
+ Error: new Error()
377
+ };
378
+ }
379
+ try {
380
+ return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
381
+ }
382
+ catch (raw_exn$2){
383
+ var exn$2 = Caml_js_exceptions.internalToOCamlException(raw_exn$2);
384
+ throw {
385
+ RE_EXN_ID: Persistence.StorageError,
386
+ message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
387
+ reason: exn$2,
388
+ Error: new Error()
389
+ };
390
+ }
391
+ };
298
392
  var setOrThrow$1 = function (items, table, itemSchema) {
299
393
  return setOrThrow(sql, items, table, itemSchema, pgSchema);
300
394
  };
301
395
  return {
302
396
  isInitialized: isInitialized,
303
397
  initialize: initialize,
398
+ loadEffectCaches: loadEffectCaches,
304
399
  loadByIdsOrThrow: loadByIdsOrThrow,
400
+ loadByFieldOrThrow: loadByFieldOrThrow,
305
401
  setOrThrow: setOrThrow$1
306
402
  };
307
403
  }
308
404
 
309
405
  var maxItemsPerQuery = 500;
310
406
 
311
- exports.makeCreateIndexSql = makeCreateIndexSql;
312
- exports.makeCreateTableIndicesSql = makeCreateTableIndicesSql;
313
- exports.makeCreateTableSql = makeCreateTableSql;
407
+ exports.makeCreateIndexQuery = makeCreateIndexQuery;
408
+ exports.makeCreateTableIndicesQuery = makeCreateTableIndicesQuery;
409
+ exports.makeCreateTableQuery = makeCreateTableQuery;
314
410
  exports.makeInitializeTransaction = makeInitializeTransaction;
315
- exports.makeLoadByIdSql = makeLoadByIdSql;
316
- exports.makeLoadByIdsSql = makeLoadByIdsSql;
317
- exports.makeInsertUnnestSetSql = makeInsertUnnestSetSql;
318
- exports.makeInsertValuesSetSql = makeInsertValuesSetSql;
411
+ exports.makeLoadByIdQuery = makeLoadByIdQuery;
412
+ exports.makeLoadByFieldQuery = makeLoadByFieldQuery;
413
+ exports.makeLoadByIdsQuery = makeLoadByIdsQuery;
414
+ exports.makeInsertUnnestSetQuery = makeInsertUnnestSetQuery;
415
+ exports.makeInsertValuesSetQuery = makeInsertValuesSetQuery;
319
416
  exports.rawEventsTableName = rawEventsTableName;
417
+ exports.eventSyncStateTableName = eventSyncStateTableName;
320
418
  exports.maxItemsPerQuery = maxItemsPerQuery;
321
419
  exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
322
420
  exports.chunkArray = chunkArray;
421
+ exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
422
+ exports.pgEncodingErrorSchema = pgEncodingErrorSchema;
423
+ exports.PgEncodingError = PgEncodingError;
323
424
  exports.setQueryCache = setQueryCache;
324
425
  exports.setOrThrow = setOrThrow;
426
+ exports.setEntityHistoryOrThrow = setEntityHistoryOrThrow;
427
+ exports.makeSchemaTableNamesQuery = makeSchemaTableNamesQuery;
325
428
  exports.make = make;
326
- /* setQueryCache Not a pure module */
429
+ /* pgEncodingErrorSchema Not a pure module */
package/src/Utils.res CHANGED
@@ -301,6 +301,23 @@ module String = {
301
301
  str->Js.String2.slice(~from=0, ~to_=1)->Js.String.toUpperCase ++
302
302
  str->Js.String2.sliceToEnd(~from=1)
303
303
  }
304
+
305
+ /**
306
+ `replaceAll(str, substr, newSubstr)` returns a new `string` which is
307
+ identical to `str` except with all matching instances of `substr` replaced
308
+ by `newSubstr`. `substr` is treated as a verbatim string to match, not a
309
+ regular expression.
310
+ See [`String.replaceAll`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replaceAll) on MDN.
311
+
312
+ ## Examples
313
+
314
+ ```rescript
315
+ String.replaceAll("old old string", "old", "new") == "new new string"
316
+ String.replaceAll("the cat and the dog", "the", "this") == "this cat and this dog"
317
+ ```
318
+ */
319
+ @send
320
+ external replaceAll: (string, string, string) => string = "replaceAll"
304
321
  }
305
322
 
306
323
  module Result = {
@@ -148,28 +148,6 @@ type t<'entity> = {
148
148
  insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise<unit>,
149
149
  }
150
150
 
151
- let insertRow = (
152
- self: t<'entity>,
153
- ~sql,
154
- ~historyRow: historyRow<'entity>,
155
- ~shouldCopyCurrentEntity,
156
- ) => {
157
- let row = historyRow->S.reverseConvertToJsonOrThrow(self.schema)
158
- self.insertFn(sql, row, ~shouldCopyCurrentEntity)
159
- }
160
-
161
- let batchInsertRows = (self: t<'entity>, ~sql, ~rows: array<historyRow<'entity>>) => {
162
- rows
163
- ->Belt.Array.map(historyRow => {
164
- let containsRollbackDiffChange =
165
- historyRow.containsRollbackDiffChange->Belt.Option.getWithDefault(false)
166
- let shouldCopyCurrentEntity = !containsRollbackDiffChange
167
- self->insertRow(~sql, ~historyRow, ~shouldCopyCurrentEntity)
168
- })
169
- ->Promise.all
170
- ->Promise.thenResolve(_ => ())
171
- }
172
-
173
151
  type entityInternal
174
152
 
175
153
  external castInternal: t<'entity> => t<entityInternal> = "%identity"
@@ -4,7 +4,6 @@
4
4
  var Table = require("./Table.res.js");
5
5
  var Js_exn = require("rescript/lib/js/js_exn.js");
6
6
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
7
- var Belt_Option = require("rescript/lib/js/belt_Option.js");
8
7
  var S$RescriptSchema = require("rescript-schema/src/S.res.js");
9
8
 
10
9
  var variants = [
@@ -158,21 +157,6 @@ function makeHistoryRowSchema(entitySchema) {
158
157
  }));
159
158
  }
160
159
 
161
- function insertRow(self, sql, historyRow, shouldCopyCurrentEntity) {
162
- var row = S$RescriptSchema.reverseConvertToJsonOrThrow(historyRow, self.schema);
163
- return self.insertFn(sql, row, shouldCopyCurrentEntity);
164
- }
165
-
166
- function batchInsertRows(self, sql, rows) {
167
- return Promise.all(Belt_Array.map(rows, (function (historyRow) {
168
- var containsRollbackDiffChange = Belt_Option.getWithDefault(historyRow.containsRollbackDiffChange, false);
169
- var shouldCopyCurrentEntity = !containsRollbackDiffChange;
170
- return insertRow(self, sql, historyRow, shouldCopyCurrentEntity);
171
- }))).then(function (param) {
172
-
173
- });
174
- }
175
-
176
160
  function fromTable(table, pgSchema, schema) {
177
161
  var currentChangeFieldNames = [
178
162
  "entity_history_block_timestamp",
@@ -300,7 +284,5 @@ exports.entityIdOnlySchema = entityIdOnlySchema;
300
284
  exports.previousHistoryFieldsSchema = previousHistoryFieldsSchema;
301
285
  exports.currentHistoryFieldsSchema = currentHistoryFieldsSchema;
302
286
  exports.makeHistoryRowSchema = makeHistoryRowSchema;
303
- exports.insertRow = insertRow;
304
- exports.batchInsertRows = batchInsertRows;
305
287
  exports.fromTable = fromTable;
306
288
  /* schema Not a pure module */
package/src/db/Table.res CHANGED
@@ -185,7 +185,7 @@ type sqlParams<'entity> = {
185
185
  hasArrayField: bool,
186
186
  }
187
187
 
188
- let toSqlParams = (table: table, ~schema) => {
188
+ let toSqlParams = (table: table, ~schema, ~pgSchema) => {
189
189
  let quotedFieldNames = []
190
190
  let quotedNonPrimaryFieldNames = []
191
191
  let arrayFieldTypes = []
@@ -240,7 +240,7 @@ let toSqlParams = (table: table, ~schema) => {
240
240
  switch field {
241
241
  | Field(f) =>
242
242
  switch f.fieldType {
243
- | Custom(fieldType) => `${(Text :> string)}[]::${(fieldType :> string)}`
243
+ | Custom(fieldType) => `${(Text :> string)}[]::"${pgSchema}".${(fieldType :> string)}`
244
244
  | Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
245
245
  | fieldType => (fieldType :> string)
246
246
  }
@@ -183,7 +183,7 @@ function getUnfilteredCompositeIndicesUnsafe(table) {
183
183
  }));
184
184
  }
185
185
 
186
- function toSqlParams(table, schema) {
186
+ function toSqlParams(table, schema, pgSchema) {
187
187
  var quotedFieldNames = [];
188
188
  var quotedNonPrimaryFieldNames = [];
189
189
  var arrayFieldTypes = [];
@@ -261,7 +261,7 @@ function toSqlParams(table, schema) {
261
261
  var fieldType = f.fieldType;
262
262
  tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
263
263
  fieldType === "BOOLEAN" ? "INTEGER[]::" + f.fieldType : fieldType
264
- ) : "TEXT[]::" + fieldType;
264
+ ) : "TEXT[]::\"" + pgSchema + "\"." + fieldType;
265
265
  } else {
266
266
  tmp = "TEXT";
267
267
  }