envio 2.25.0 → 2.25.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts CHANGED
@@ -105,6 +105,8 @@ export function experimental_createEffect<
105
105
  readonly input: IS;
106
106
  /** The output schema of the effect. */
107
107
  readonly output: OS;
108
+ /** Whether the effect should be cached. */
109
+ readonly cache?: boolean;
108
110
  },
109
111
  handler: (args: EffectArgs<I>) => Promise<R>
110
112
  ): Effect<I, O>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.25.0",
3
+ "version": "v2.25.2",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.25.0",
29
- "envio-linux-arm64": "v2.25.0",
30
- "envio-darwin-x64": "v2.25.0",
31
- "envio-darwin-arm64": "v2.25.0"
28
+ "envio-linux-x64": "v2.25.2",
29
+ "envio-linux-arm64": "v2.25.2",
30
+ "envio-darwin-x64": "v2.25.2",
31
+ "envio-darwin-arm64": "v2.25.2"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
package/src/Envio.gen.ts CHANGED
@@ -21,7 +21,9 @@ export type effectOptions<input,output> = {
21
21
  /** The input schema of the effect. */
22
22
  readonly input: RescriptSchema_S_t<input>;
23
23
  /** The output schema of the effect. */
24
- readonly output: RescriptSchema_S_t<output>
24
+ readonly output: RescriptSchema_S_t<output>;
25
+ /** Whether the effect should be cached. */
26
+ readonly cache?: boolean
25
27
  };
26
28
 
27
29
  export type effectContext = $$effectContext;
package/src/Envio.res CHANGED
@@ -22,6 +22,8 @@ and effectOptions<'input, 'output> = {
22
22
  input: S.t<'input>,
23
23
  /** The output schema of the effect. */
24
24
  output: S.t<'output>,
25
+ /** Whether the effect should be cached. */
26
+ cache?: bool,
25
27
  }
26
28
  @genType.import(("./Types.ts", "EffectContext"))
27
29
  and effectContext = {
@@ -48,5 +50,6 @@ let experimental_createEffect = (
48
50
  >
49
51
  ),
50
52
  callsCount: 0,
53
+ cache: options.cache->Belt.Option.getWithDefault(false),
51
54
  }->(Utils.magic: Internal.effect => effect<'input, 'output>)
52
55
  }
package/src/Envio.res.js CHANGED
@@ -2,12 +2,14 @@
2
2
  'use strict';
3
3
 
4
4
  var Prometheus = require("./Prometheus.res.js");
5
+ var Belt_Option = require("rescript/lib/js/belt_Option.js");
5
6
 
6
7
  function experimental_createEffect(options, handler) {
7
8
  Prometheus.EffectCallsCount.set(0, options.name);
8
9
  return {
9
10
  name: options.name,
10
11
  handler: handler,
12
+ cache: Belt_Option.getWithDefault(options.cache, false),
11
13
  callsCount: 0
12
14
  };
13
15
  }
package/src/Internal.res CHANGED
@@ -197,6 +197,7 @@ type effectArgs = {
197
197
  type effect = {
198
198
  name: string,
199
199
  handler: effectArgs => promise<effectOutput>,
200
+ cache: bool,
200
201
  mutable callsCount: int,
201
202
  }
202
203
 
@@ -5,6 +5,18 @@
5
5
  // Currently there are quite many code spread across
6
6
  // DbFunctions, Db, Migrations, InMemoryStore modules which use codegen code directly.
7
7
 
8
+ // The type reflects an effect cache table in the db
9
+ // It might be present even if the effect is not used in the application
10
+ type effectCache = {
11
+ name: string,
12
+ // Number of rows in the table
13
+ mutable size: int,
14
+ // Lazily attached table definition when effect is used in the application
15
+ mutable table: option<Table.table>,
16
+ }
17
+
18
+ type operator = [#">" | #"="]
19
+
8
20
  type storage = {
9
21
  // Should return true if we already have persisted data
10
22
  // and we can skip initialization
@@ -15,9 +27,8 @@ type storage = {
15
27
  ~entities: array<Internal.entityConfig>=?,
16
28
  ~generalTables: array<Table.table>=?,
17
29
  ~enums: array<Internal.enumConfig<Internal.enum>>=?,
18
- // If true, the storage should clear existing data
19
- ~cleanRun: bool=?,
20
30
  ) => promise<unit>,
31
+ loadEffectCaches: unit => promise<array<effectCache>>,
21
32
  @raises("StorageError")
22
33
  loadByIdsOrThrow: 'item. (
23
34
  ~ids: array<string>,
@@ -25,6 +36,15 @@ type storage = {
25
36
  ~rowsSchema: S.t<array<'item>>,
26
37
  ) => promise<array<'item>>,
27
38
  @raises("StorageError")
39
+ loadByFieldOrThrow: 'item 'value. (
40
+ ~fieldName: string,
41
+ ~fieldSchema: S.t<'value>,
42
+ ~fieldValue: 'value,
43
+ ~operator: operator,
44
+ ~table: Table.table,
45
+ ~rowsSchema: S.t<array<'item>>,
46
+ ) => promise<array<'item>>,
47
+ @raises("StorageError")
28
48
  setOrThrow: 'item. (
29
49
  ~items: array<'item>,
30
50
  ~table: Table.table,
@@ -37,7 +57,7 @@ exception StorageError({message: string, reason: exn})
37
57
  type storageStatus =
38
58
  | Unknown
39
59
  | Initializing(promise<unit>)
40
- | Ready({cleanRun: bool})
60
+ | Ready({cleanRun: bool, effectCaches: dict<effectCache>})
41
61
 
42
62
  type t = {
43
63
  userEntities: array<Internal.entityConfig>,
@@ -79,14 +99,7 @@ let make = (
79
99
  }
80
100
  }
81
101
 
82
- let init = async (
83
- persistence,
84
- // There are not much sense in the option,
85
- // but this is how the runUpMigration used to work
86
- // and we want to keep the upsert behavior without breaking changes.
87
- ~skipIsInitializedCheck=false,
88
- ~reset=false,
89
- ) => {
102
+ let init = async (persistence, ~reset=false) => {
90
103
  try {
91
104
  let shouldRun = switch persistence.storageStatus {
92
105
  | Unknown => true
@@ -102,20 +115,37 @@ let init = async (
102
115
  resolveRef := resolve
103
116
  })
104
117
  persistence.storageStatus = Initializing(promise)
105
- if !(reset || skipIsInitializedCheck) && (await persistence.storage.isInitialized()) {
106
- persistence.storageStatus = Ready({cleanRun: false})
107
- } else {
118
+ if reset || !(await persistence.storage.isInitialized()) {
108
119
  let _ = await persistence.storage.initialize(
109
120
  ~entities=persistence.allEntities,
110
121
  ~generalTables=persistence.staticTables,
111
122
  ~enums=persistence.allEnums,
112
- ~cleanRun=reset || !skipIsInitializedCheck,
113
123
  )
114
- persistence.storageStatus = Ready({cleanRun: true})
124
+
125
+ persistence.storageStatus = Ready({
126
+ cleanRun: true,
127
+ effectCaches: Js.Dict.empty(),
128
+ })
115
129
  switch persistence.onStorageInitialize {
116
130
  | Some(onStorageInitialize) => await onStorageInitialize()
117
131
  | None => ()
118
132
  }
133
+ } else if (
134
+ // In case of a race condition,
135
+ // we want to set the initial status to Ready only once.
136
+ switch persistence.storageStatus {
137
+ | Initializing(_) => true
138
+ | _ => false
139
+ }
140
+ ) {
141
+ let effectCaches = Js.Dict.empty()
142
+ (await persistence.storage.loadEffectCaches())->Js.Array2.forEach(effectCache => {
143
+ effectCaches->Js.Dict.set(effectCache.name, effectCache)
144
+ })
145
+ persistence.storageStatus = Ready({
146
+ cleanRun: false,
147
+ effectCaches,
148
+ })
119
149
  }
120
150
  resolveRef.contents()
121
151
  }
@@ -36,8 +36,7 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
36
36
  };
37
37
  }
38
38
 
39
- async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
40
- var skipIsInitializedCheck = skipIsInitializedCheckOpt !== undefined ? skipIsInitializedCheckOpt : false;
39
+ async function init(persistence, resetOpt) {
41
40
  var reset = resetOpt !== undefined ? resetOpt : false;
42
41
  try {
43
42
  var promise = persistence.storageStatus;
@@ -63,22 +62,34 @@ async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
63
62
  TAG: "Initializing",
64
63
  _0: promise$1
65
64
  };
66
- if (!(reset || skipIsInitializedCheck) && await persistence.storage.isInitialized()) {
65
+ if (reset || !await persistence.storage.isInitialized()) {
66
+ await persistence.storage.initialize(persistence.allEntities, persistence.staticTables, persistence.allEnums);
67
67
  persistence.storageStatus = {
68
68
  TAG: "Ready",
69
- cleanRun: false
70
- };
71
- } else {
72
- await persistence.storage.initialize(persistence.allEntities, persistence.staticTables, persistence.allEnums, reset || !skipIsInitializedCheck);
73
- persistence.storageStatus = {
74
- TAG: "Ready",
75
- cleanRun: true
69
+ cleanRun: true,
70
+ effectCaches: {}
76
71
  };
77
72
  var onStorageInitialize = persistence.onStorageInitialize;
78
73
  if (onStorageInitialize !== undefined) {
79
74
  await onStorageInitialize();
80
75
  }
81
76
 
77
+ } else {
78
+ var match = persistence.storageStatus;
79
+ var tmp;
80
+ tmp = typeof match !== "object" || match.TAG !== "Initializing" ? false : true;
81
+ if (tmp) {
82
+ var effectCaches = {};
83
+ (await persistence.storage.loadEffectCaches()).forEach(function (effectCache) {
84
+ effectCaches[effectCache.name] = effectCache;
85
+ });
86
+ persistence.storageStatus = {
87
+ TAG: "Ready",
88
+ cleanRun: false,
89
+ effectCaches: effectCaches
90
+ };
91
+ }
92
+
82
93
  }
83
94
  return resolveRef.contents();
84
95
  }
package/src/PgStorage.res CHANGED
@@ -1,16 +1,16 @@
1
- let makeCreateIndexSql = (~tableName, ~indexFields, ~pgSchema) => {
1
+ let makeCreateIndexQuery = (~tableName, ~indexFields, ~pgSchema) => {
2
2
  let indexName = tableName ++ "_" ++ indexFields->Js.Array2.joinWith("_")
3
3
  let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Js.Array2.joinWith(", ")
4
4
  `CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
5
5
  }
6
6
 
7
- let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
7
+ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
8
8
  open Belt
9
9
  let tableName = table.tableName
10
10
  let createIndex = indexField =>
11
- makeCreateIndexSql(~tableName, ~indexFields=[indexField], ~pgSchema)
11
+ makeCreateIndexQuery(~tableName, ~indexFields=[indexField], ~pgSchema)
12
12
  let createCompositeIndex = indexFields => {
13
- makeCreateIndexSql(~tableName, ~indexFields, ~pgSchema)
13
+ makeCreateIndexQuery(~tableName, ~indexFields, ~pgSchema)
14
14
  }
15
15
 
16
16
  let singleIndices = table->Table.getSingleIndices
@@ -20,7 +20,7 @@ let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
20
20
  compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
21
21
  }
22
22
 
23
- let makeCreateTableSql = (table: Table.table, ~pgSchema) => {
23
+ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
24
24
  open Belt
25
25
  let fieldsMapped =
26
26
  table
@@ -58,7 +58,7 @@ let makeInitializeTransaction = (
58
58
  ~generalTables=[],
59
59
  ~entities=[],
60
60
  ~enums=[],
61
- ~cleanRun=false,
61
+ ~reuseExistingPgSchema=false,
62
62
  ) => {
63
63
  let allTables = generalTables->Array.copy
64
64
  let allEntityTables = []
@@ -71,10 +71,13 @@ let makeInitializeTransaction = (
71
71
 
72
72
  let query = ref(
73
73
  (
74
- cleanRun
75
- ? `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
76
- CREATE SCHEMA "${pgSchema}";`
77
- : `CREATE SCHEMA IF NOT EXISTS "${pgSchema}";`
74
+ reuseExistingPgSchema
75
+ // Hosted Service already have a DB with the created public schema
76
+ // It also doesn't allow to simply drop it,
77
+ // so we reuse an existing schema when it's empty (our case)
78
+ ? ""
79
+ : `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
80
+ CREATE SCHEMA "${pgSchema}";\n`
78
81
  ) ++
79
82
  `GRANT ALL ON SCHEMA "${pgSchema}" TO "${pgUser}";
80
83
  GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
@@ -87,31 +90,17 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
87
90
  ->Js.Array2.map(v => `'${v->(Utils.magic: Internal.enum => string)}'`)
88
91
  ->Js.Array2.joinWith(", ")});`
89
92
 
90
- query :=
91
- query.contents ++
92
- "\n" ++ if cleanRun {
93
- // Direct creation when cleanRunting (faster)
94
- enumCreateQuery
95
- } else {
96
- // Wrap with conditional check only when not cleanRunting
97
- `IF NOT EXISTS (
98
- SELECT 1 FROM pg_type
99
- WHERE typname = '${enumConfig.name->Js.String2.toLowerCase}'
100
- AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '${pgSchema}')
101
- ) THEN
102
- ${enumCreateQuery}
103
- END IF;`
104
- }
93
+ query := query.contents ++ "\n" ++ enumCreateQuery
105
94
  })
106
95
 
107
96
  // Batch all table creation first (optimal for PostgreSQL)
108
97
  allTables->Js.Array2.forEach((table: Table.table) => {
109
- query := query.contents ++ "\n" ++ makeCreateTableSql(table, ~pgSchema)
98
+ query := query.contents ++ "\n" ++ makeCreateTableQuery(table, ~pgSchema)
110
99
  })
111
100
 
112
101
  // Then batch all indices (better performance when tables exist)
113
102
  allTables->Js.Array2.forEach((table: Table.table) => {
114
- let indices = makeCreateTableIndicesSql(table, ~pgSchema)
103
+ let indices = makeCreateTableIndicesQuery(table, ~pgSchema)
115
104
  if indices !== "" {
116
105
  query := query.contents ++ "\n" ++ indices
117
106
  }
@@ -131,7 +120,7 @@ END IF;`
131
120
  query :=
132
121
  query.contents ++
133
122
  "\n" ++
134
- makeCreateIndexSql(
123
+ makeCreateIndexQuery(
135
124
  ~tableName=derivedFromField.derivedFromEntity,
136
125
  ~indexFields=[indexField],
137
126
  ~pgSchema,
@@ -139,28 +128,26 @@ END IF;`
139
128
  })
140
129
  })
141
130
 
142
- [
143
- // Return optimized queries - main DDL in DO block, functions separate
144
- // Note: DO $$ BEGIN wrapper is only needed for PL/pgSQL conditionals (IF NOT EXISTS)
145
- // Reset case uses direct DDL (faster), non-cleanRun case uses conditionals (safer)
146
- cleanRun || enums->Utils.Array.isEmpty
147
- ? query.contents
148
- : `DO $$ BEGIN ${query.contents} END $$;`,
149
- // Functions query (separate as they can't be in DO block)
150
- ]->Js.Array2.concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : [])
131
+ [query.contents]->Js.Array2.concat(
132
+ functionsQuery.contents !== "" ? [functionsQuery.contents] : [],
133
+ )
151
134
  }
152
135
 
153
- let makeLoadByIdSql = (~pgSchema, ~tableName) => {
136
+ let makeLoadByIdQuery = (~pgSchema, ~tableName) => {
154
137
  `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = $1 LIMIT 1;`
155
138
  }
156
139
 
157
- let makeLoadByIdsSql = (~pgSchema, ~tableName) => {
140
+ let makeLoadByFieldQuery = (~pgSchema, ~tableName, ~fieldName, ~operator) => {
141
+ `SELECT * FROM "${pgSchema}"."${tableName}" WHERE "${fieldName}" ${operator} $1;`
142
+ }
143
+
144
+ let makeLoadByIdsQuery = (~pgSchema, ~tableName) => {
158
145
  `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
159
146
  }
160
147
 
161
- let makeInsertUnnestSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
148
+ let makeInsertUnnestSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
162
149
  let {quotedFieldNames, quotedNonPrimaryFieldNames, arrayFieldTypes} =
163
- table->Table.toSqlParams(~schema=itemSchema)
150
+ table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
164
151
 
165
152
  let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
166
153
 
@@ -188,8 +175,9 @@ SELECT * FROM unnest(${arrayFieldTypes
188
175
  } ++ ";"
189
176
  }
190
177
 
191
- let makeInsertValuesSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
192
- let {quotedFieldNames, quotedNonPrimaryFieldNames} = table->Table.toSqlParams(~schema=itemSchema)
178
+ let makeInsertValuesSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
179
+ let {quotedFieldNames, quotedNonPrimaryFieldNames} =
180
+ table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
193
181
 
194
182
  let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
195
183
  let fieldsCount = quotedFieldNames->Array.length
@@ -236,12 +224,13 @@ VALUES${placeholders.contents}` ++
236
224
  // they are always guaranteed to be an object.
237
225
  // FIXME what about Fuel params?
238
226
  let rawEventsTableName = "raw_events"
227
+ let eventSyncStateTableName = "event_sync_state"
239
228
 
240
229
  // Constants for chunking
241
230
  let maxItemsPerQuery = 500
242
231
 
243
232
  let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
244
- let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema)
233
+ let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema, ~pgSchema)
245
234
  let isRawEvents = table.tableName === rawEventsTableName
246
235
 
247
236
  // Should experiment how much it'll affect performance
@@ -253,7 +242,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
253
242
 
254
243
  if isRawEvents || !hasArrayField {
255
244
  {
256
- "sql": makeInsertUnnestSetSql(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
245
+ "query": makeInsertUnnestSetQuery(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
257
246
  "convertOrThrow": S.compile(
258
247
  S.unnest(dbSchema),
259
248
  ~input=Value,
@@ -265,7 +254,12 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
265
254
  }
266
255
  } else {
267
256
  {
268
- "sql": makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=maxItemsPerQuery),
257
+ "query": makeInsertValuesSetQuery(
258
+ ~pgSchema,
259
+ ~table,
260
+ ~itemSchema,
261
+ ~itemsCount=maxItemsPerQuery,
262
+ ),
269
263
  "convertOrThrow": S.compile(
270
264
  S.unnest(itemSchema)->S.preprocess(_ => {
271
265
  serializer: Utils.Array.flatten->Utils.magic,
@@ -291,6 +285,35 @@ let chunkArray = (arr: array<'a>, ~chunkSize) => {
291
285
  chunks
292
286
  }
293
287
 
288
+ let removeInvalidUtf8InPlace = entities =>
289
+ entities->Js.Array2.forEach(item => {
290
+ let dict = item->(Utils.magic: 'a => dict<unknown>)
291
+ dict->Utils.Dict.forEachWithKey((key, value) => {
292
+ if value->Js.typeof === "string" {
293
+ let value = value->(Utils.magic: unknown => string)
294
+ // We mutate here, since we don't care
295
+ // about the original value with \x00 anyways.
296
+ //
297
+ // This is unsafe, but we rely that it'll use
298
+ // the mutated reference on retry.
299
+ // TODO: Test it properly after we start using
300
+ // in-memory PGLite for indexer test framework.
301
+ dict->Js.Dict.set(
302
+ key,
303
+ value
304
+ ->Utils.String.replaceAll("\x00", "")
305
+ ->(Utils.magic: string => unknown),
306
+ )
307
+ }
308
+ })
309
+ })
310
+
311
+ let pgEncodingErrorSchema = S.object(s =>
312
+ s.tag("message", `invalid byte sequence for encoding "UTF8": 0x00`)
313
+ )
314
+
315
+ exception PgEncodingError({table: Table.table})
316
+
294
317
  // WeakMap for caching table batch set queries
295
318
  let setQueryCache = Utils.WeakMap.make()
296
319
  let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
@@ -298,7 +321,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
298
321
  ()
299
322
  } else {
300
323
  // Get or create cached query for this table
301
- let query = switch setQueryCache->Utils.WeakMap.get(table) {
324
+ let data = switch setQueryCache->Utils.WeakMap.get(table) {
302
325
  | Some(cached) => cached
303
326
  | None => {
304
327
  let newQuery = makeTableBatchSetQuery(
@@ -311,10 +334,8 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
311
334
  }
312
335
  }
313
336
 
314
- let sqlQuery = query["sql"]
315
-
316
337
  try {
317
- if query["isInsertValues"] {
338
+ if data["isInsertValues"] {
318
339
  let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
319
340
  let responses = []
320
341
  chunks->Js.Array2.forEach(chunk => {
@@ -325,9 +346,9 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
325
346
  // Either use the sql query for full chunks from cache
326
347
  // or create a new one for partial chunks on the fly.
327
348
  isFullChunk
328
- ? sqlQuery
329
- : makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
330
- query["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
349
+ ? data["query"]
350
+ : makeInsertValuesSetQuery(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
351
+ data["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
331
352
  )
332
353
  responses->Js.Array2.push(response)->ignore
333
354
  })
@@ -335,8 +356,8 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
335
356
  } else {
336
357
  // Use UNNEST approach for single query
337
358
  await sql->Postgres.preparedUnsafe(
338
- sqlQuery,
339
- query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
359
+ data["query"],
360
+ data["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
340
361
  )
341
362
  }
342
363
  } catch {
@@ -358,23 +379,81 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
358
379
  }
359
380
  }
360
381
 
382
+ let setEntityHistoryOrThrow = (
383
+ sql,
384
+ ~entityHistory: EntityHistory.t<'entity>,
385
+ ~rows: array<EntityHistory.historyRow<'entity>>,
386
+ ~shouldCopyCurrentEntity=?,
387
+ ~shouldRemoveInvalidUtf8=false,
388
+ ) => {
389
+ rows
390
+ ->Belt.Array.map(historyRow => {
391
+ let row = historyRow->S.reverseConvertToJsonOrThrow(entityHistory.schema)
392
+ if shouldRemoveInvalidUtf8 {
393
+ [row]->removeInvalidUtf8InPlace
394
+ }
395
+ entityHistory.insertFn(
396
+ sql,
397
+ row,
398
+ ~shouldCopyCurrentEntity=switch shouldCopyCurrentEntity {
399
+ | Some(v) => v
400
+ | None => {
401
+ let containsRollbackDiffChange =
402
+ historyRow.containsRollbackDiffChange->Belt.Option.getWithDefault(false)
403
+ !containsRollbackDiffChange
404
+ }
405
+ },
406
+ )
407
+ })
408
+ ->Promise.all
409
+ ->(Utils.magic: promise<array<unit>> => promise<unit>)
410
+ }
411
+
412
+ type schemaTableName = {
413
+ @as("table_name")
414
+ tableName: string,
415
+ }
416
+
417
+ let makeSchemaTableNamesQuery = (~pgSchema) => {
418
+ `SELECT table_name FROM information_schema.tables WHERE table_schema = '${pgSchema}';`
419
+ }
420
+
361
421
  let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
362
422
  let isInitialized = async () => {
363
- let schemas =
423
+ let envioTables =
364
424
  await sql->Postgres.unsafe(
365
- `SELECT schema_name FROM information_schema.schemata WHERE schema_name = '${pgSchema}';`,
425
+ `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${eventSyncStateTableName}';`,
366
426
  )
367
- schemas->Utils.Array.notEmpty
427
+ envioTables->Utils.Array.notEmpty
368
428
  }
369
429
 
370
- let initialize = async (~entities=[], ~generalTables=[], ~enums=[], ~cleanRun=false) => {
430
+ let initialize = async (~entities=[], ~generalTables=[], ~enums=[]) => {
431
+ let schemaTableNames: array<schemaTableName> =
432
+ await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
433
+
434
+ // The initialization query will completely drop the schema and recreate it from scratch.
435
+ // So we need to check if the schema is not used for anything else than envio.
436
+ if (
437
+ // Should pass with existing schema with no tables
438
+ // This might happen when used with public schema
439
+ // which is automatically created by postgres.
440
+ schemaTableNames->Utils.Array.notEmpty &&
441
+ // Otherwise should throw if there's a table, but no envio specific one
442
+ // This means that the schema is used for something else than envio.
443
+ !(schemaTableNames->Js.Array2.some(table => table.tableName === eventSyncStateTableName))
444
+ ) {
445
+ Js.Exn.raiseError(
446
+ `Cannot run Envio migrations on PostgreSQL schema "${pgSchema}" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: "pnpm envio local db-migrate down"\n2. Or specify a different schema name by setting the "ENVIO_PG_PUBLIC_SCHEMA" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.`,
447
+ )
448
+ }
449
+
371
450
  let queries = makeInitializeTransaction(
372
451
  ~pgSchema,
373
452
  ~pgUser,
374
453
  ~generalTables,
375
454
  ~entities,
376
455
  ~enums,
377
- ~cleanRun,
456
+ ~reuseExistingPgSchema=schemaTableNames->Utils.Array.isEmpty,
378
457
  )
379
458
  // Execute all queries within a single transaction for integrity
380
459
  let _ = await sql->Postgres.beginSql(sql => {
@@ -382,17 +461,37 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
382
461
  })
383
462
  }
384
463
 
464
+ let loadEffectCaches = async () => {
465
+ let schemaTableNames: array<schemaTableName> =
466
+ await sql->Postgres.unsafe(makeSchemaTableNamesQuery(~pgSchema))
467
+ schemaTableNames->Belt.Array.keepMapU(schemaTableName => {
468
+ if schemaTableName.tableName->Js.String2.startsWith("effect_cache_") {
469
+ Some(
470
+ (
471
+ {
472
+ name: schemaTableName.tableName,
473
+ size: 0,
474
+ table: None,
475
+ }: Persistence.effectCache
476
+ ),
477
+ )
478
+ } else {
479
+ None
480
+ }
481
+ })
482
+ }
483
+
385
484
  let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
386
485
  switch await (
387
486
  switch ids {
388
487
  | [_] =>
389
488
  sql->Postgres.preparedUnsafe(
390
- makeLoadByIdSql(~pgSchema, ~tableName=table.tableName),
489
+ makeLoadByIdQuery(~pgSchema, ~tableName=table.tableName),
391
490
  ids->Obj.magic,
392
491
  )
393
492
  | _ =>
394
493
  sql->Postgres.preparedUnsafe(
395
- makeLoadByIdsSql(~pgSchema, ~tableName=table.tableName),
494
+ makeLoadByIdsQuery(~pgSchema, ~tableName=table.tableName),
396
495
  [ids]->Obj.magic,
397
496
  )
398
497
  }
@@ -417,6 +516,52 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
417
516
  }
418
517
  }
419
518
 
519
+ let loadByFieldOrThrow = async (
520
+ ~fieldName: string,
521
+ ~fieldSchema,
522
+ ~fieldValue,
523
+ ~operator: Persistence.operator,
524
+ ~table: Table.table,
525
+ ~rowsSchema,
526
+ ) => {
527
+ let params = try [fieldValue->S.reverseConvertToJsonOrThrow(fieldSchema)]->Obj.magic catch {
528
+ | exn =>
529
+ raise(
530
+ Persistence.StorageError({
531
+ message: `Failed loading "${table.tableName}" from storage by field "${fieldName}". Couldn't serialize provided value.`,
532
+ reason: exn,
533
+ }),
534
+ )
535
+ }
536
+ switch await sql->Postgres.preparedUnsafe(
537
+ makeLoadByFieldQuery(
538
+ ~pgSchema,
539
+ ~tableName=table.tableName,
540
+ ~fieldName,
541
+ ~operator=(operator :> string),
542
+ ),
543
+ params,
544
+ ) {
545
+ | exception exn =>
546
+ raise(
547
+ Persistence.StorageError({
548
+ message: `Failed loading "${table.tableName}" from storage by field "${fieldName}"`,
549
+ reason: exn,
550
+ }),
551
+ )
552
+ | rows =>
553
+ try rows->S.parseOrThrow(rowsSchema) catch {
554
+ | exn =>
555
+ raise(
556
+ Persistence.StorageError({
557
+ message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
558
+ reason: exn,
559
+ }),
560
+ )
561
+ }
562
+ }
563
+ }
564
+
420
565
  let setOrThrow = (
421
566
  type item,
422
567
  ~items: array<item>,
@@ -435,6 +580,8 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
435
580
  {
436
581
  isInitialized,
437
582
  initialize,
583
+ loadByFieldOrThrow,
584
+ loadEffectCaches,
438
585
  loadByIdsOrThrow,
439
586
  setOrThrow,
440
587
  }
@@ -4,14 +4,17 @@
4
4
  var $$Array = require("rescript/lib/js/array.js");
5
5
  var Table = require("./db/Table.res.js");
6
6
  var Utils = require("./Utils.res.js");
7
+ var Js_exn = require("rescript/lib/js/js_exn.js");
7
8
  var Schema = require("./db/Schema.res.js");
8
9
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
10
+ var Belt_Option = require("rescript/lib/js/belt_Option.js");
9
11
  var Caml_option = require("rescript/lib/js/caml_option.js");
10
12
  var Persistence = require("./Persistence.res.js");
13
+ var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
11
14
  var S$RescriptSchema = require("rescript-schema/src/S.res.js");
12
15
  var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
13
16
 
14
- function makeCreateIndexSql(tableName, indexFields, pgSchema) {
17
+ function makeCreateIndexQuery(tableName, indexFields, pgSchema) {
15
18
  var indexName = tableName + "_" + indexFields.join("_");
16
19
  var index = Belt_Array.map(indexFields, (function (idx) {
17
20
  return "\"" + idx + "\"";
@@ -19,20 +22,20 @@ function makeCreateIndexSql(tableName, indexFields, pgSchema) {
19
22
  return "CREATE INDEX IF NOT EXISTS \"" + indexName + "\" ON \"" + pgSchema + "\".\"" + tableName + "\"(" + index + ");";
20
23
  }
21
24
 
22
- function makeCreateTableIndicesSql(table, pgSchema) {
25
+ function makeCreateTableIndicesQuery(table, pgSchema) {
23
26
  var tableName = table.tableName;
24
27
  var createIndex = function (indexField) {
25
- return makeCreateIndexSql(tableName, [indexField], pgSchema);
28
+ return makeCreateIndexQuery(tableName, [indexField], pgSchema);
26
29
  };
27
30
  var createCompositeIndex = function (indexFields) {
28
- return makeCreateIndexSql(tableName, indexFields, pgSchema);
31
+ return makeCreateIndexQuery(tableName, indexFields, pgSchema);
29
32
  };
30
33
  var singleIndices = Table.getSingleIndices(table);
31
34
  var compositeIndices = Table.getCompositeIndices(table);
32
35
  return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
33
36
  }
34
37
 
35
- function makeCreateTableSql(table, pgSchema) {
38
+ function makeCreateTableQuery(table, pgSchema) {
36
39
  var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
37
40
  var defaultValue = field.defaultValue;
38
41
  var fieldType = field.fieldType;
@@ -56,11 +59,11 @@ function makeCreateTableSql(table, pgSchema) {
56
59
  ) + ");";
57
60
  }
58
61
 
59
- function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt, cleanRunOpt) {
62
+ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt, reuseExistingPgSchemaOpt) {
60
63
  var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
61
64
  var entities = entitiesOpt !== undefined ? entitiesOpt : [];
62
65
  var enums = enumsOpt !== undefined ? enumsOpt : [];
63
- var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
66
+ var reuseExistingPgSchema = reuseExistingPgSchemaOpt !== undefined ? reuseExistingPgSchemaOpt : false;
64
67
  var allTables = $$Array.copy(generalTables);
65
68
  var allEntityTables = [];
66
69
  entities.forEach(function (entity) {
@@ -71,22 +74,20 @@ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesO
71
74
  var derivedSchema = Schema.make(allEntityTables);
72
75
  var query = {
73
76
  contents: (
74
- cleanRun ? "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";" : "CREATE SCHEMA IF NOT EXISTS \"" + pgSchema + "\";"
77
+ reuseExistingPgSchema ? "" : "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";\n"
75
78
  ) + ("GRANT ALL ON SCHEMA \"" + pgSchema + "\" TO \"" + pgUser + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;")
76
79
  };
77
80
  enums.forEach(function (enumConfig) {
78
81
  var enumCreateQuery = "CREATE TYPE \"" + pgSchema + "\"." + enumConfig.name + " AS ENUM(" + enumConfig.variants.map(function (v) {
79
82
  return "'" + v + "'";
80
83
  }).join(", ") + ");";
81
- query.contents = query.contents + "\n" + (
82
- cleanRun ? enumCreateQuery : "IF NOT EXISTS (\n SELECT 1 FROM pg_type \n WHERE typname = '" + enumConfig.name.toLowerCase() + "' \n AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '" + pgSchema + "')\n) THEN \n " + enumCreateQuery + "\nEND IF;"
83
- );
84
+ query.contents = query.contents + "\n" + enumCreateQuery;
84
85
  });
85
86
  allTables.forEach(function (table) {
86
- query.contents = query.contents + "\n" + makeCreateTableSql(table, pgSchema);
87
+ query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema);
87
88
  });
88
89
  allTables.forEach(function (table) {
89
- var indices = makeCreateTableIndicesSql(table, pgSchema);
90
+ var indices = makeCreateTableIndicesQuery(table, pgSchema);
90
91
  if (indices !== "") {
91
92
  query.contents = query.contents + "\n" + indices;
92
93
  return ;
@@ -100,22 +101,26 @@ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesO
100
101
  functionsQuery.contents = functionsQuery.contents + "\n" + entity.entityHistory.createInsertFnQuery;
101
102
  Table.getDerivedFromFields(entity.table).forEach(function (derivedFromField) {
102
103
  var indexField = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(derivedSchema, derivedFromField));
103
- query.contents = query.contents + "\n" + makeCreateIndexSql(derivedFromField.derivedFromEntity, [indexField], pgSchema);
104
+ query.contents = query.contents + "\n" + makeCreateIndexQuery(derivedFromField.derivedFromEntity, [indexField], pgSchema);
104
105
  });
105
106
  });
106
- return [cleanRun || Utils.$$Array.isEmpty(enums) ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
107
+ return [query.contents].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
107
108
  }
108
109
 
109
- function makeLoadByIdSql(pgSchema, tableName) {
110
+ function makeLoadByIdQuery(pgSchema, tableName) {
110
111
  return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = $1 LIMIT 1;";
111
112
  }
112
113
 
113
- function makeLoadByIdsSql(pgSchema, tableName) {
114
+ function makeLoadByFieldQuery(pgSchema, tableName, fieldName, operator) {
115
+ return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE \"" + fieldName + "\" " + operator + " $1;";
116
+ }
117
+
118
+ function makeLoadByIdsQuery(pgSchema, tableName) {
114
119
  return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = ANY($1::text[]);";
115
120
  }
116
121
 
117
- function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
118
- var match = Table.toSqlParams(table, itemSchema);
122
+ function makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents) {
123
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
119
124
  var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
120
125
  var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
121
126
  return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + match.quotedFieldNames.join(", ") + ")\nSELECT * FROM unnest(" + match.arrayFieldTypes.map(function (arrayFieldType, idx) {
@@ -131,8 +136,8 @@ function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
131
136
  ) + ";";
132
137
  }
133
138
 
134
- function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
135
- var match = Table.toSqlParams(table, itemSchema);
139
+ function makeInsertValuesSetQuery(pgSchema, table, itemSchema, itemsCount) {
140
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
136
141
  var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
137
142
  var quotedFieldNames = match.quotedFieldNames;
138
143
  var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
@@ -164,18 +169,20 @@ function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
164
169
 
165
170
  var rawEventsTableName = "raw_events";
166
171
 
172
+ var eventSyncStateTableName = "event_sync_state";
173
+
167
174
  function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
168
- var match = Table.toSqlParams(table, itemSchema);
175
+ var match = Table.toSqlParams(table, itemSchema, pgSchema);
169
176
  var isRawEvents = table.tableName === rawEventsTableName;
170
177
  if (isRawEvents || !match.hasArrayField) {
171
178
  return {
172
- sql: makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents),
179
+ query: makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents),
173
180
  convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.unnest(match.dbSchema), "Output", "Input", "Sync", false),
174
181
  isInsertValues: false
175
182
  };
176
183
  } else {
177
184
  return {
178
- sql: makeInsertValuesSetSql(pgSchema, table, itemSchema, 500),
185
+ query: makeInsertValuesSetQuery(pgSchema, table, itemSchema, 500),
179
186
  convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.preprocess(S$RescriptSchema.unnest(itemSchema), (function (param) {
180
187
  return {
181
188
  s: (function (prim) {
@@ -199,6 +206,24 @@ function chunkArray(arr, chunkSize) {
199
206
  return chunks;
200
207
  }
201
208
 
209
+ function removeInvalidUtf8InPlace(entities) {
210
+ entities.forEach(function (item) {
211
+ Utils.Dict.forEachWithKey(item, (function (key, value) {
212
+ if (typeof value === "string") {
213
+ item[key] = value.replaceAll("\x00", "");
214
+ return ;
215
+ }
216
+
217
+ }));
218
+ });
219
+ }
220
+
221
+ var pgEncodingErrorSchema = S$RescriptSchema.object(function (s) {
222
+ s.tag("message", "invalid byte sequence for encoding \"UTF8\": 0x00");
223
+ });
224
+
225
+ var PgEncodingError = /* @__PURE__ */Caml_exceptions.create("PgStorage.PgEncodingError");
226
+
202
227
  var setQueryCache = new WeakMap();
203
228
 
204
229
  async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
@@ -206,25 +231,24 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
206
231
  return ;
207
232
  }
208
233
  var cached = setQueryCache.get(table);
209
- var query;
234
+ var data;
210
235
  if (cached !== undefined) {
211
- query = Caml_option.valFromOption(cached);
236
+ data = Caml_option.valFromOption(cached);
212
237
  } else {
213
238
  var newQuery = makeTableBatchSetQuery(pgSchema, table, itemSchema);
214
239
  setQueryCache.set(table, newQuery);
215
- query = newQuery;
240
+ data = newQuery;
216
241
  }
217
- var sqlQuery = query.sql;
218
242
  try {
219
- if (!query.isInsertValues) {
220
- return await sql.unsafe(sqlQuery, query.convertOrThrow(items), {prepare: true});
243
+ if (!data.isInsertValues) {
244
+ return await sql.unsafe(data.query, data.convertOrThrow(items), {prepare: true});
221
245
  }
222
246
  var chunks = chunkArray(items, 500);
223
247
  var responses = [];
224
248
  chunks.forEach(function (chunk) {
225
249
  var chunkSize = chunk.length;
226
250
  var isFullChunk = chunkSize === 500;
227
- var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema, chunkSize), query.convertOrThrow(chunk), {prepare: true});
251
+ var response = sql.unsafe(isFullChunk ? data.query : makeInsertValuesSetQuery(pgSchema, table, itemSchema, chunkSize), data.convertOrThrow(chunk), {prepare: true});
228
252
  responses.push(response);
229
253
  });
230
254
  await Promise.all(responses);
@@ -249,28 +273,61 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
249
273
  }
250
274
  }
251
275
 
276
+ function setEntityHistoryOrThrow(sql, entityHistory, rows, shouldCopyCurrentEntity, shouldRemoveInvalidUtf8Opt) {
277
+ var shouldRemoveInvalidUtf8 = shouldRemoveInvalidUtf8Opt !== undefined ? shouldRemoveInvalidUtf8Opt : false;
278
+ return Promise.all(Belt_Array.map(rows, (function (historyRow) {
279
+ var row = S$RescriptSchema.reverseConvertToJsonOrThrow(historyRow, entityHistory.schema);
280
+ if (shouldRemoveInvalidUtf8) {
281
+ removeInvalidUtf8InPlace([row]);
282
+ }
283
+ return entityHistory.insertFn(sql, row, shouldCopyCurrentEntity !== undefined ? shouldCopyCurrentEntity : !Belt_Option.getWithDefault(historyRow.containsRollbackDiffChange, false));
284
+ })));
285
+ }
286
+
287
+ function makeSchemaTableNamesQuery(pgSchema) {
288
+ return "SELECT table_name FROM information_schema.tables WHERE table_schema = '" + pgSchema + "';";
289
+ }
290
+
252
291
  function make(sql, pgSchema, pgUser) {
253
292
  var isInitialized = async function () {
254
- var schemas = await sql.unsafe("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '" + pgSchema + "';");
255
- return Utils.$$Array.notEmpty(schemas);
293
+ var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND table_name = '" + eventSyncStateTableName + "';");
294
+ return Utils.$$Array.notEmpty(envioTables);
256
295
  };
257
- var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt, cleanRunOpt) {
296
+ var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt) {
258
297
  var entities = entitiesOpt !== undefined ? entitiesOpt : [];
259
298
  var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
260
299
  var enums = enumsOpt !== undefined ? enumsOpt : [];
261
- var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
262
- var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums, cleanRun);
300
+ var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
301
+ if (Utils.$$Array.notEmpty(schemaTableNames) && !schemaTableNames.some(function (table) {
302
+ return table.table_name === eventSyncStateTableName;
303
+ })) {
304
+ Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
305
+ }
306
+ var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
263
307
  await sql.begin(function (sql) {
264
308
  return queries.map(function (query) {
265
309
  return sql.unsafe(query);
266
310
  });
267
311
  });
268
312
  };
313
+ var loadEffectCaches = async function () {
314
+ var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
315
+ return Belt_Array.keepMapU(schemaTableNames, (function (schemaTableName) {
316
+ if (schemaTableName.table_name.startsWith("effect_cache_")) {
317
+ return {
318
+ name: schemaTableName.table_name,
319
+ size: 0,
320
+ table: undefined
321
+ };
322
+ }
323
+
324
+ }));
325
+ };
269
326
  var loadByIdsOrThrow = async function (ids, table, rowsSchema) {
270
327
  var rows;
271
328
  try {
272
329
  rows = await (
273
- ids.length !== 1 ? sql.unsafe(makeLoadByIdsSql(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdSql(pgSchema, table.tableName), ids, {prepare: true})
330
+ ids.length !== 1 ? sql.unsafe(makeLoadByIdsQuery(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdQuery(pgSchema, table.tableName), ids, {prepare: true})
274
331
  );
275
332
  }
276
333
  catch (raw_exn){
@@ -295,32 +352,81 @@ function make(sql, pgSchema, pgUser) {
295
352
  };
296
353
  }
297
354
  };
355
+ var loadByFieldOrThrow = async function (fieldName, fieldSchema, fieldValue, operator, table, rowsSchema) {
356
+ var params;
357
+ try {
358
+ params = [S$RescriptSchema.reverseConvertToJsonOrThrow(fieldValue, fieldSchema)];
359
+ }
360
+ catch (raw_exn){
361
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
362
+ throw {
363
+ RE_EXN_ID: Persistence.StorageError,
364
+ message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\". Couldn't serialize provided value.",
365
+ reason: exn,
366
+ Error: new Error()
367
+ };
368
+ }
369
+ var rows;
370
+ try {
371
+ rows = await sql.unsafe(makeLoadByFieldQuery(pgSchema, table.tableName, fieldName, operator), params, {prepare: true});
372
+ }
373
+ catch (raw_exn$1){
374
+ var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
375
+ throw {
376
+ RE_EXN_ID: Persistence.StorageError,
377
+ message: "Failed loading \"" + table.tableName + "\" from storage by field \"" + fieldName + "\"",
378
+ reason: exn$1,
379
+ Error: new Error()
380
+ };
381
+ }
382
+ try {
383
+ return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
384
+ }
385
+ catch (raw_exn$2){
386
+ var exn$2 = Caml_js_exceptions.internalToOCamlException(raw_exn$2);
387
+ throw {
388
+ RE_EXN_ID: Persistence.StorageError,
389
+ message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
390
+ reason: exn$2,
391
+ Error: new Error()
392
+ };
393
+ }
394
+ };
298
395
  var setOrThrow$1 = function (items, table, itemSchema) {
299
396
  return setOrThrow(sql, items, table, itemSchema, pgSchema);
300
397
  };
301
398
  return {
302
399
  isInitialized: isInitialized,
303
400
  initialize: initialize,
401
+ loadEffectCaches: loadEffectCaches,
304
402
  loadByIdsOrThrow: loadByIdsOrThrow,
403
+ loadByFieldOrThrow: loadByFieldOrThrow,
305
404
  setOrThrow: setOrThrow$1
306
405
  };
307
406
  }
308
407
 
309
408
  var maxItemsPerQuery = 500;
310
409
 
311
- exports.makeCreateIndexSql = makeCreateIndexSql;
312
- exports.makeCreateTableIndicesSql = makeCreateTableIndicesSql;
313
- exports.makeCreateTableSql = makeCreateTableSql;
410
+ exports.makeCreateIndexQuery = makeCreateIndexQuery;
411
+ exports.makeCreateTableIndicesQuery = makeCreateTableIndicesQuery;
412
+ exports.makeCreateTableQuery = makeCreateTableQuery;
314
413
  exports.makeInitializeTransaction = makeInitializeTransaction;
315
- exports.makeLoadByIdSql = makeLoadByIdSql;
316
- exports.makeLoadByIdsSql = makeLoadByIdsSql;
317
- exports.makeInsertUnnestSetSql = makeInsertUnnestSetSql;
318
- exports.makeInsertValuesSetSql = makeInsertValuesSetSql;
414
+ exports.makeLoadByIdQuery = makeLoadByIdQuery;
415
+ exports.makeLoadByFieldQuery = makeLoadByFieldQuery;
416
+ exports.makeLoadByIdsQuery = makeLoadByIdsQuery;
417
+ exports.makeInsertUnnestSetQuery = makeInsertUnnestSetQuery;
418
+ exports.makeInsertValuesSetQuery = makeInsertValuesSetQuery;
319
419
  exports.rawEventsTableName = rawEventsTableName;
420
+ exports.eventSyncStateTableName = eventSyncStateTableName;
320
421
  exports.maxItemsPerQuery = maxItemsPerQuery;
321
422
  exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
322
423
  exports.chunkArray = chunkArray;
424
+ exports.removeInvalidUtf8InPlace = removeInvalidUtf8InPlace;
425
+ exports.pgEncodingErrorSchema = pgEncodingErrorSchema;
426
+ exports.PgEncodingError = PgEncodingError;
323
427
  exports.setQueryCache = setQueryCache;
324
428
  exports.setOrThrow = setOrThrow;
429
+ exports.setEntityHistoryOrThrow = setEntityHistoryOrThrow;
430
+ exports.makeSchemaTableNamesQuery = makeSchemaTableNamesQuery;
325
431
  exports.make = make;
326
- /* setQueryCache Not a pure module */
432
+ /* pgEncodingErrorSchema Not a pure module */
package/src/Utils.res CHANGED
@@ -301,6 +301,23 @@ module String = {
301
301
  str->Js.String2.slice(~from=0, ~to_=1)->Js.String.toUpperCase ++
302
302
  str->Js.String2.sliceToEnd(~from=1)
303
303
  }
304
+
305
+ /**
306
+ `replaceAll(str, substr, newSubstr)` returns a new `string` which is
307
+ identical to `str` except with all matching instances of `substr` replaced
308
+ by `newSubstr`. `substr` is treated as a verbatim string to match, not a
309
+ regular expression.
310
+ See [`String.replaceAll`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replaceAll) on MDN.
311
+
312
+ ## Examples
313
+
314
+ ```rescript
315
+ String.replaceAll("old old string", "old", "new") == "new new string"
316
+ String.replaceAll("the cat and the dog", "the", "this") == "this cat and this dog"
317
+ ```
318
+ */
319
+ @send
320
+ external replaceAll: (string, string, string) => string = "replaceAll"
304
321
  }
305
322
 
306
323
  module Result = {
@@ -148,28 +148,6 @@ type t<'entity> = {
148
148
  insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise<unit>,
149
149
  }
150
150
 
151
- let insertRow = (
152
- self: t<'entity>,
153
- ~sql,
154
- ~historyRow: historyRow<'entity>,
155
- ~shouldCopyCurrentEntity,
156
- ) => {
157
- let row = historyRow->S.reverseConvertToJsonOrThrow(self.schema)
158
- self.insertFn(sql, row, ~shouldCopyCurrentEntity)
159
- }
160
-
161
- let batchInsertRows = (self: t<'entity>, ~sql, ~rows: array<historyRow<'entity>>) => {
162
- rows
163
- ->Belt.Array.map(historyRow => {
164
- let containsRollbackDiffChange =
165
- historyRow.containsRollbackDiffChange->Belt.Option.getWithDefault(false)
166
- let shouldCopyCurrentEntity = !containsRollbackDiffChange
167
- self->insertRow(~sql, ~historyRow, ~shouldCopyCurrentEntity)
168
- })
169
- ->Promise.all
170
- ->Promise.thenResolve(_ => ())
171
- }
172
-
173
151
  type entityInternal
174
152
 
175
153
  external castInternal: t<'entity> => t<entityInternal> = "%identity"
@@ -4,7 +4,6 @@
4
4
  var Table = require("./Table.res.js");
5
5
  var Js_exn = require("rescript/lib/js/js_exn.js");
6
6
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
7
- var Belt_Option = require("rescript/lib/js/belt_Option.js");
8
7
  var S$RescriptSchema = require("rescript-schema/src/S.res.js");
9
8
 
10
9
  var variants = [
@@ -158,21 +157,6 @@ function makeHistoryRowSchema(entitySchema) {
158
157
  }));
159
158
  }
160
159
 
161
- function insertRow(self, sql, historyRow, shouldCopyCurrentEntity) {
162
- var row = S$RescriptSchema.reverseConvertToJsonOrThrow(historyRow, self.schema);
163
- return self.insertFn(sql, row, shouldCopyCurrentEntity);
164
- }
165
-
166
- function batchInsertRows(self, sql, rows) {
167
- return Promise.all(Belt_Array.map(rows, (function (historyRow) {
168
- var containsRollbackDiffChange = Belt_Option.getWithDefault(historyRow.containsRollbackDiffChange, false);
169
- var shouldCopyCurrentEntity = !containsRollbackDiffChange;
170
- return insertRow(self, sql, historyRow, shouldCopyCurrentEntity);
171
- }))).then(function (param) {
172
-
173
- });
174
- }
175
-
176
160
  function fromTable(table, pgSchema, schema) {
177
161
  var currentChangeFieldNames = [
178
162
  "entity_history_block_timestamp",
@@ -300,7 +284,5 @@ exports.entityIdOnlySchema = entityIdOnlySchema;
300
284
  exports.previousHistoryFieldsSchema = previousHistoryFieldsSchema;
301
285
  exports.currentHistoryFieldsSchema = currentHistoryFieldsSchema;
302
286
  exports.makeHistoryRowSchema = makeHistoryRowSchema;
303
- exports.insertRow = insertRow;
304
- exports.batchInsertRows = batchInsertRows;
305
287
  exports.fromTable = fromTable;
306
288
  /* schema Not a pure module */
package/src/db/Table.res CHANGED
@@ -185,7 +185,7 @@ type sqlParams<'entity> = {
185
185
  hasArrayField: bool,
186
186
  }
187
187
 
188
- let toSqlParams = (table: table, ~schema) => {
188
+ let toSqlParams = (table: table, ~schema, ~pgSchema) => {
189
189
  let quotedFieldNames = []
190
190
  let quotedNonPrimaryFieldNames = []
191
191
  let arrayFieldTypes = []
@@ -240,7 +240,7 @@ let toSqlParams = (table: table, ~schema) => {
240
240
  switch field {
241
241
  | Field(f) =>
242
242
  switch f.fieldType {
243
- | Custom(fieldType) => `${(Text :> string)}[]::${(fieldType :> string)}`
243
+ | Custom(fieldType) => `${(Text :> string)}[]::"${pgSchema}".${(fieldType :> string)}`
244
244
  | Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
245
245
  | fieldType => (fieldType :> string)
246
246
  }
@@ -183,7 +183,7 @@ function getUnfilteredCompositeIndicesUnsafe(table) {
183
183
  }));
184
184
  }
185
185
 
186
- function toSqlParams(table, schema) {
186
+ function toSqlParams(table, schema, pgSchema) {
187
187
  var quotedFieldNames = [];
188
188
  var quotedNonPrimaryFieldNames = [];
189
189
  var arrayFieldTypes = [];
@@ -261,7 +261,7 @@ function toSqlParams(table, schema) {
261
261
  var fieldType = f.fieldType;
262
262
  tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
263
263
  fieldType === "BOOLEAN" ? "INTEGER[]::" + f.fieldType : fieldType
264
- ) : "TEXT[]::" + fieldType;
264
+ ) : "TEXT[]::\"" + pgSchema + "\"." + fieldType;
265
265
  } else {
266
266
  tmp = "TEXT";
267
267
  }