envio 2.22.3 → 2.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/evm.schema.json CHANGED
@@ -33,6 +33,13 @@
33
33
  "null"
34
34
  ]
35
35
  },
36
+ "output": {
37
+ "description": "Path where the generated directory will be placed. By default it's 'generated' relative to the current working directory. If set, it'll be a path relative to the config file location.",
38
+ "type": [
39
+ "string",
40
+ "null"
41
+ ]
42
+ },
36
43
  "contracts": {
37
44
  "description": "Global contract definitions that must contain all definitions except addresses. You can share a single handler/abi/event definitions for contracts across multiple chains.",
38
45
  "type": [
package/fuel.schema.json CHANGED
@@ -26,6 +26,13 @@
26
26
  "null"
27
27
  ]
28
28
  },
29
+ "output": {
30
+ "description": "Path where the generated directory will be placed. By default it's 'generated' relative to the current working directory. If set, it'll be a path relative to the config file location.",
31
+ "type": [
32
+ "string",
33
+ "null"
34
+ ]
35
+ },
29
36
  "contracts": {
30
37
  "description": "Global contract definitions that must contain all definitions except addresses. You can share a single handler/abi/event definitions for contracts across multiple chains.",
31
38
  "type": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.22.3",
3
+ "version": "v2.24.0",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.22.3",
29
- "envio-linux-arm64": "v2.22.3",
30
- "envio-darwin-x64": "v2.22.3",
31
- "envio-darwin-arm64": "v2.22.3"
28
+ "envio-linux-x64": "v2.24.0",
29
+ "envio-linux-arm64": "v2.24.0",
30
+ "envio-darwin-x64": "v2.24.0",
31
+ "envio-darwin-arm64": "v2.24.0"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
package/src/Logging.res CHANGED
@@ -168,6 +168,14 @@ let logForItem = (eventItem, level: Pino.logLevel, message: string, ~params=?) =
168
168
  (eventItem->getEventLogger->Utils.magic->Js.Dict.unsafeGet((level :> string)))(params, message)
169
169
  }
170
170
 
171
+ let noopLogger: Envio.logger = {
172
+ info: (_message: string, ~params as _=?) => (),
173
+ debug: (_message: string, ~params as _=?) => (),
174
+ warn: (_message: string, ~params as _=?) => (),
175
+ error: (_message: string, ~params as _=?) => (),
176
+ errorWithExn: (_message: string, _exn) => (),
177
+ }
178
+
171
179
  let getUserLogger = (eventItem): Envio.logger => {
172
180
  info: (message: string, ~params=?) => eventItem->logForItem(#uinfo, message, ~params?),
173
181
  debug: (message: string, ~params=?) => eventItem->logForItem(#udebug, message, ~params?),
@@ -197,6 +197,34 @@ function logForItem(eventItem, level, message, params) {
197
197
  return getEventLogger(eventItem)[level](params, message);
198
198
  }
199
199
 
200
+ function noopLogger_debug(_message, param) {
201
+
202
+ }
203
+
204
+ function noopLogger_info(_message, param) {
205
+
206
+ }
207
+
208
+ function noopLogger_warn(_message, param) {
209
+
210
+ }
211
+
212
+ function noopLogger_error(_message, param) {
213
+
214
+ }
215
+
216
+ function noopLogger_errorWithExn(_message, _exn) {
217
+
218
+ }
219
+
220
+ var noopLogger = {
221
+ debug: noopLogger_debug,
222
+ info: noopLogger_info,
223
+ warn: noopLogger_warn,
224
+ error: noopLogger_error,
225
+ errorWithExn: noopLogger_errorWithExn
226
+ };
227
+
200
228
  function getUserLogger(eventItem) {
201
229
  return {
202
230
  debug: (function (message, params) {
@@ -243,5 +271,6 @@ exports.createChild = createChild;
243
271
  exports.createChildFrom = createChildFrom;
244
272
  exports.getEventLogger = getEventLogger;
245
273
  exports.logForItem = logForItem;
274
+ exports.noopLogger = noopLogger;
246
275
  exports.getUserLogger = getUserLogger;
247
276
  /* logLevels Not a pure module */
@@ -12,14 +12,28 @@ type storage = {
12
12
  // Should initialize the storage so we can start interacting with it
13
13
  // Eg create connection, schema, tables, etc.
14
14
  initialize: (
15
- ~entities: array<Internal.entityConfig>,
16
- ~staticTables: array<Table.table>,
17
- ~enums: array<Internal.enumConfig<Internal.enum>>,
15
+ ~entities: array<Internal.entityConfig>=?,
16
+ ~generalTables: array<Table.table>=?,
17
+ ~enums: array<Internal.enumConfig<Internal.enum>>=?,
18
18
  // If true, the storage should clear existing data
19
- ~cleanRun: bool,
19
+ ~cleanRun: bool=?,
20
+ ) => promise<unit>,
21
+ @raises("StorageError")
22
+ loadByIdsOrThrow: 'item. (
23
+ ~ids: array<string>,
24
+ ~table: Table.table,
25
+ ~rowsSchema: S.t<array<'item>>,
26
+ ) => promise<array<'item>>,
27
+ @raises("StorageError")
28
+ setOrThrow: 'item. (
29
+ ~items: array<'item>,
30
+ ~table: Table.table,
31
+ ~itemSchema: S.t<'item>,
20
32
  ) => promise<unit>,
21
33
  }
22
34
 
35
+ exception StorageError({message: string, reason: exn})
36
+
23
37
  type storageStatus =
24
38
  | Unknown
25
39
  | Initializing(promise<unit>)
@@ -93,7 +107,7 @@ let init = async (
93
107
  } else {
94
108
  let _ = await persistence.storage.initialize(
95
109
  ~entities=persistence.allEntities,
96
- ~staticTables=persistence.staticTables,
110
+ ~generalTables=persistence.staticTables,
97
111
  ~enums=persistence.allEnums,
98
112
  ~cleanRun=reset || !skipIsInitializedCheck,
99
113
  )
@@ -109,3 +123,12 @@ let init = async (
109
123
  | exn => exn->ErrorHandling.mkLogAndRaise(~msg=`EE800: Failed to initialize the indexer storage.`)
110
124
  }
111
125
  }
126
+
127
+ let getInitializedStorageOrThrow = persistence => {
128
+ switch persistence.storageStatus {
129
+ | Unknown
130
+ | Initializing(_) =>
131
+ Js.Exn.raiseError(`Failed to access the indexer storage. The Persistence layer is not initialized.`)
132
+ | Ready(_) => persistence.storage
133
+ }
134
+ }
@@ -1,10 +1,14 @@
1
1
  // Generated by ReScript, PLEASE EDIT WITH CARE
2
2
  'use strict';
3
3
 
4
+ var Js_exn = require("rescript/lib/js/js_exn.js");
4
5
  var EntityHistory = require("./db/EntityHistory.res.js");
5
6
  var ErrorHandling = require("./ErrorHandling.res.js");
7
+ var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
6
8
  var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
7
9
 
10
+ var StorageError = /* @__PURE__ */Caml_exceptions.create("Persistence.StorageError");
11
+
8
12
  var entityHistoryActionEnumConfig_name = EntityHistory.RowAction.name;
9
13
 
10
14
  var entityHistoryActionEnumConfig_variants = EntityHistory.RowAction.variants;
@@ -84,7 +88,18 @@ async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
84
88
  }
85
89
  }
86
90
 
91
+ function getInitializedStorageOrThrow(persistence) {
92
+ var match = persistence.storageStatus;
93
+ if (typeof match !== "object" || match.TAG === "Initializing") {
94
+ return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
95
+ } else {
96
+ return persistence.storage;
97
+ }
98
+ }
99
+
100
+ exports.StorageError = StorageError;
87
101
  exports.entityHistoryActionEnumConfig = entityHistoryActionEnumConfig;
88
102
  exports.make = make;
89
103
  exports.init = init;
104
+ exports.getInitializedStorageOrThrow = getInitializedStorageOrThrow;
90
105
  /* EntityHistory Not a pure module */
package/src/PgStorage.res CHANGED
@@ -1,16 +1,16 @@
1
- let makeCreateIndexSqlUnsafe = (~tableName, ~indexFields, ~pgSchema) => {
1
+ let makeCreateIndexSql = (~tableName, ~indexFields, ~pgSchema) => {
2
2
  let indexName = tableName ++ "_" ++ indexFields->Js.Array2.joinWith("_")
3
3
  let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Js.Array2.joinWith(", ")
4
4
  `CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
5
5
  }
6
6
 
7
- let makeCreateTableIndicesSqlUnsafe = (table: Table.table, ~pgSchema) => {
7
+ let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
8
8
  open Belt
9
9
  let tableName = table.tableName
10
10
  let createIndex = indexField =>
11
- makeCreateIndexSqlUnsafe(~tableName, ~indexFields=[indexField], ~pgSchema)
11
+ makeCreateIndexSql(~tableName, ~indexFields=[indexField], ~pgSchema)
12
12
  let createCompositeIndex = indexFields => {
13
- makeCreateIndexSqlUnsafe(~tableName, ~indexFields, ~pgSchema)
13
+ makeCreateIndexSql(~tableName, ~indexFields, ~pgSchema)
14
14
  }
15
15
 
16
16
  let singleIndices = table->Table.getSingleIndices
@@ -20,7 +20,7 @@ let makeCreateTableIndicesSqlUnsafe = (table: Table.table, ~pgSchema) => {
20
20
  compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
21
21
  }
22
22
 
23
- let makeCreateTableSqlUnsafe = (table: Table.table, ~pgSchema) => {
23
+ let makeCreateTableSql = (table: Table.table, ~pgSchema) => {
24
24
  open Belt
25
25
  let fieldsMapped =
26
26
  table
@@ -55,12 +55,12 @@ let makeCreateTableSqlUnsafe = (table: Table.table, ~pgSchema) => {
55
55
  let makeInitializeTransaction = (
56
56
  ~pgSchema,
57
57
  ~pgUser,
58
- ~staticTables,
59
- ~entities,
60
- ~enums,
61
- ~cleanRun,
58
+ ~generalTables=[],
59
+ ~entities=[],
60
+ ~enums=[],
61
+ ~cleanRun=false,
62
62
  ) => {
63
- let allTables = staticTables->Array.copy
63
+ let allTables = generalTables->Array.copy
64
64
  let allEntityTables = []
65
65
  entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
66
66
  allEntityTables->Js.Array2.push(entity.table)->ignore
@@ -106,12 +106,12 @@ END IF;`
106
106
 
107
107
  // Batch all table creation first (optimal for PostgreSQL)
108
108
  allTables->Js.Array2.forEach((table: Table.table) => {
109
- query := query.contents ++ "\n" ++ makeCreateTableSqlUnsafe(table, ~pgSchema)
109
+ query := query.contents ++ "\n" ++ makeCreateTableSql(table, ~pgSchema)
110
110
  })
111
111
 
112
112
  // Then batch all indices (better performance when tables exist)
113
113
  allTables->Js.Array2.forEach((table: Table.table) => {
114
- let indices = makeCreateTableIndicesSqlUnsafe(table, ~pgSchema)
114
+ let indices = makeCreateTableIndicesSql(table, ~pgSchema)
115
115
  if indices !== "" {
116
116
  query := query.contents ++ "\n" ++ indices
117
117
  }
@@ -131,7 +131,7 @@ END IF;`
131
131
  query :=
132
132
  query.contents ++
133
133
  "\n" ++
134
- makeCreateIndexSqlUnsafe(
134
+ makeCreateIndexSql(
135
135
  ~tableName=derivedFromField.derivedFromEntity,
136
136
  ~indexFields=[indexField],
137
137
  ~pgSchema,
@@ -143,11 +143,221 @@ END IF;`
143
143
  // Return optimized queries - main DDL in DO block, functions separate
144
144
  // Note: DO $$ BEGIN wrapper is only needed for PL/pgSQL conditionals (IF NOT EXISTS)
145
145
  // Reset case uses direct DDL (faster), non-cleanRun case uses conditionals (safer)
146
- cleanRun ? query.contents : `DO $$ BEGIN ${query.contents} END $$;`,
146
+ cleanRun || enums->Utils.Array.isEmpty
147
+ ? query.contents
148
+ : `DO $$ BEGIN ${query.contents} END $$;`,
147
149
  // Functions query (separate as they can't be in DO block)
148
150
  ]->Js.Array2.concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : [])
149
151
  }
150
152
 
153
+ let makeLoadByIdSql = (~pgSchema, ~tableName) => {
154
+ `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = $1 LIMIT 1;`
155
+ }
156
+
157
+ let makeLoadByIdsSql = (~pgSchema, ~tableName) => {
158
+ `SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
159
+ }
160
+
161
+ let makeInsertUnnestSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
162
+ let {quotedFieldNames, quotedNonPrimaryFieldNames, arrayFieldTypes} =
163
+ table->Table.toSqlParams(~schema=itemSchema)
164
+
165
+ let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
166
+
167
+ `INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Js.Array2.joinWith(", ")})
168
+ SELECT * FROM unnest(${arrayFieldTypes
169
+ ->Js.Array2.mapi((arrayFieldType, idx) => {
170
+ `$${(idx + 1)->Js.Int.toString}::${arrayFieldType}`
171
+ })
172
+ ->Js.Array2.joinWith(",")})` ++
173
+ switch (isRawEvents, primaryKeyFieldNames) {
174
+ | (true, _)
175
+ | (_, []) => ``
176
+ | (false, primaryKeyFieldNames) =>
177
+ `ON CONFLICT(${primaryKeyFieldNames
178
+ ->Js.Array2.map(fieldName => `"${fieldName}"`)
179
+ ->Js.Array2.joinWith(",")}) DO ` ++ (
180
+ quotedNonPrimaryFieldNames->Utils.Array.isEmpty
181
+ ? `NOTHING`
182
+ : `UPDATE SET ${quotedNonPrimaryFieldNames
183
+ ->Js.Array2.map(fieldName => {
184
+ `${fieldName} = EXCLUDED.${fieldName}`
185
+ })
186
+ ->Js.Array2.joinWith(",")}`
187
+ )
188
+ } ++ ";"
189
+ }
190
+
191
+ let makeInsertValuesSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
192
+ let {quotedFieldNames, quotedNonPrimaryFieldNames} = table->Table.toSqlParams(~schema=itemSchema)
193
+
194
+ let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
195
+ let fieldsCount = quotedFieldNames->Array.length
196
+
197
+ // Create placeholder variables for the VALUES clause - using $1, $2, etc.
198
+ let placeholders = ref("")
199
+ for idx in 1 to itemsCount {
200
+ if idx > 1 {
201
+ placeholders := placeholders.contents ++ ","
202
+ }
203
+ placeholders := placeholders.contents ++ "("
204
+ for fieldIdx in 0 to fieldsCount - 1 {
205
+ if fieldIdx > 0 {
206
+ placeholders := placeholders.contents ++ ","
207
+ }
208
+ placeholders := placeholders.contents ++ `$${(fieldIdx * itemsCount + idx)->Js.Int.toString}`
209
+ }
210
+ placeholders := placeholders.contents ++ ")"
211
+ }
212
+
213
+ `INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Js.Array2.joinWith(", ")})
214
+ VALUES${placeholders.contents}` ++
215
+ switch primaryKeyFieldNames {
216
+ | [] => ``
217
+ | primaryKeyFieldNames =>
218
+ `ON CONFLICT(${primaryKeyFieldNames
219
+ ->Js.Array2.map(fieldName => `"${fieldName}"`)
220
+ ->Js.Array2.joinWith(",")}) DO ` ++ (
221
+ quotedNonPrimaryFieldNames->Utils.Array.isEmpty
222
+ ? `NOTHING`
223
+ : `UPDATE SET ${quotedNonPrimaryFieldNames
224
+ ->Js.Array2.map(fieldName => {
225
+ `${fieldName} = EXCLUDED.${fieldName}`
226
+ })
227
+ ->Js.Array2.joinWith(",")}`
228
+ )
229
+ } ++ ";"
230
+ }
231
+
232
+ // Should move this to a better place
233
+ // We need it for the isRawEvents check in makeTableBatchSet
234
+ // to always apply the unnest optimization.
235
+ // This is needed, because even though it has JSON fields,
236
+ // they are always guaranteed to be an object.
237
+ // FIXME what about Fuel params?
238
+ let rawEventsTableName = "raw_events"
239
+
240
+ // Constants for chunking
241
+ let maxItemsPerQuery = 500
242
+
243
+ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
244
+ let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema)
245
+ let isRawEvents = table.tableName === rawEventsTableName
246
+
247
+ // Should experiment how much it'll affect performance
248
+ // Although, it should be fine not to perform the validation check,
249
+ // since the values are validated by type system.
250
+ // As an alternative, we can only run Sury validation only when
251
+ // db write fails to show a better user error.
252
+ let typeValidation = false
253
+
254
+ if isRawEvents || !hasArrayField {
255
+ {
256
+ "sql": makeInsertUnnestSetSql(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
257
+ "convertOrThrow": S.compile(
258
+ S.unnest(dbSchema),
259
+ ~input=Value,
260
+ ~output=Unknown,
261
+ ~mode=Sync,
262
+ ~typeValidation,
263
+ ),
264
+ "isInsertValues": false,
265
+ }
266
+ } else {
267
+ {
268
+ "sql": makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=maxItemsPerQuery),
269
+ "convertOrThrow": S.compile(
270
+ S.unnest(itemSchema)->S.preprocess(_ => {
271
+ serializer: Utils.Array.flatten->Utils.magic,
272
+ }),
273
+ ~input=Value,
274
+ ~output=Unknown,
275
+ ~mode=Sync,
276
+ ~typeValidation,
277
+ ),
278
+ "isInsertValues": true,
279
+ }
280
+ }
281
+ }
282
+
283
+ let chunkArray = (arr: array<'a>, ~chunkSize) => {
284
+ let chunks = []
285
+ let i = ref(0)
286
+ while i.contents < arr->Array.length {
287
+ let chunk = arr->Js.Array2.slice(~start=i.contents, ~end_=i.contents + chunkSize)
288
+ chunks->Js.Array2.push(chunk)->ignore
289
+ i := i.contents + chunkSize
290
+ }
291
+ chunks
292
+ }
293
+
294
+ // WeakMap for caching table batch set queries
295
+ let setQueryCache = Utils.WeakMap.make()
296
+ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
297
+ if items->Array.length === 0 {
298
+ ()
299
+ } else {
300
+ // Get or create cached query for this table
301
+ let query = switch setQueryCache->Utils.WeakMap.get(table) {
302
+ | Some(cached) => cached
303
+ | None => {
304
+ let newQuery = makeTableBatchSetQuery(
305
+ ~pgSchema,
306
+ ~table,
307
+ ~itemSchema=itemSchema->S.toUnknown,
308
+ )
309
+ setQueryCache->Utils.WeakMap.set(table, newQuery)->ignore
310
+ newQuery
311
+ }
312
+ }
313
+
314
+ let sqlQuery = query["sql"]
315
+
316
+ try {
317
+ if query["isInsertValues"] {
318
+ let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
319
+ let responses = []
320
+ chunks->Js.Array2.forEach(chunk => {
321
+ let chunkSize = chunk->Array.length
322
+ let isFullChunk = chunkSize === maxItemsPerQuery
323
+
324
+ let response = sql->Postgres.preparedUnsafe(
325
+ // Either use the sql query for full chunks from cache
326
+ // or create a new one for partial chunks on the fly.
327
+ isFullChunk
328
+ ? sqlQuery
329
+ : makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
330
+ query["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
331
+ )
332
+ responses->Js.Array2.push(response)->ignore
333
+ })
334
+ let _ = await Promise.all(responses)
335
+ } else {
336
+ // Use UNNEST approach for single query
337
+ await sql->Postgres.preparedUnsafe(
338
+ sqlQuery,
339
+ query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
340
+ )
341
+ }
342
+ } catch {
343
+ | S.Raised(_) as exn =>
344
+ raise(
345
+ Persistence.StorageError({
346
+ message: `Failed to convert items for table "${table.tableName}"`,
347
+ reason: exn,
348
+ }),
349
+ )
350
+ | exn =>
351
+ raise(
352
+ Persistence.StorageError({
353
+ message: `Failed to insert items into table "${table.tableName}"`,
354
+ reason: exn,
355
+ }),
356
+ )
357
+ }
358
+ }
359
+ }
360
+
151
361
  let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
152
362
  let isInitialized = async () => {
153
363
  let schemas =
@@ -157,11 +367,11 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
157
367
  schemas->Utils.Array.notEmpty
158
368
  }
159
369
 
160
- let initialize = async (~entities, ~staticTables, ~enums, ~cleanRun) => {
370
+ let initialize = async (~entities=[], ~generalTables=[], ~enums=[], ~cleanRun=false) => {
161
371
  let queries = makeInitializeTransaction(
162
372
  ~pgSchema,
163
373
  ~pgUser,
164
- ~staticTables,
374
+ ~generalTables,
165
375
  ~entities,
166
376
  ~enums,
167
377
  ~cleanRun,
@@ -172,8 +382,60 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
172
382
  })
173
383
  }
174
384
 
385
+ let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
386
+ switch await (
387
+ switch ids {
388
+ | [_] =>
389
+ sql->Postgres.preparedUnsafe(
390
+ makeLoadByIdSql(~pgSchema, ~tableName=table.tableName),
391
+ ids->Obj.magic,
392
+ )
393
+ | _ =>
394
+ sql->Postgres.preparedUnsafe(
395
+ makeLoadByIdsSql(~pgSchema, ~tableName=table.tableName),
396
+ [ids]->Obj.magic,
397
+ )
398
+ }
399
+ ) {
400
+ | exception exn =>
401
+ raise(
402
+ Persistence.StorageError({
403
+ message: `Failed loading "${table.tableName}" from storage by ids`,
404
+ reason: exn,
405
+ }),
406
+ )
407
+ | rows =>
408
+ try rows->S.parseOrThrow(rowsSchema) catch {
409
+ | exn =>
410
+ raise(
411
+ Persistence.StorageError({
412
+ message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
413
+ reason: exn,
414
+ }),
415
+ )
416
+ }
417
+ }
418
+ }
419
+
420
+ let setOrThrow = (
421
+ type item,
422
+ ~items: array<item>,
423
+ ~table: Table.table,
424
+ ~itemSchema: S.t<item>,
425
+ ) => {
426
+ setOrThrow(
427
+ sql,
428
+ ~items=items->(Utils.magic: array<item> => array<unknown>),
429
+ ~table,
430
+ ~itemSchema=itemSchema->S.toUnknown,
431
+ ~pgSchema,
432
+ )
433
+ }
434
+
175
435
  {
176
436
  isInitialized,
177
437
  initialize,
438
+ loadByIdsOrThrow,
439
+ setOrThrow,
178
440
  }
179
441
  }
@@ -6,8 +6,12 @@ var Table = require("./db/Table.res.js");
6
6
  var Utils = require("./Utils.res.js");
7
7
  var Schema = require("./db/Schema.res.js");
8
8
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
9
+ var Caml_option = require("rescript/lib/js/caml_option.js");
10
+ var Persistence = require("./Persistence.res.js");
11
+ var S$RescriptSchema = require("rescript-schema/src/S.res.js");
12
+ var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
9
13
 
10
- function makeCreateIndexSqlUnsafe(tableName, indexFields, pgSchema) {
14
+ function makeCreateIndexSql(tableName, indexFields, pgSchema) {
11
15
  var indexName = tableName + "_" + indexFields.join("_");
12
16
  var index = Belt_Array.map(indexFields, (function (idx) {
13
17
  return "\"" + idx + "\"";
@@ -15,20 +19,20 @@ function makeCreateIndexSqlUnsafe(tableName, indexFields, pgSchema) {
15
19
  return "CREATE INDEX IF NOT EXISTS \"" + indexName + "\" ON \"" + pgSchema + "\".\"" + tableName + "\"(" + index + ");";
16
20
  }
17
21
 
18
- function makeCreateTableIndicesSqlUnsafe(table, pgSchema) {
22
+ function makeCreateTableIndicesSql(table, pgSchema) {
19
23
  var tableName = table.tableName;
20
24
  var createIndex = function (indexField) {
21
- return makeCreateIndexSqlUnsafe(tableName, [indexField], pgSchema);
25
+ return makeCreateIndexSql(tableName, [indexField], pgSchema);
22
26
  };
23
27
  var createCompositeIndex = function (indexFields) {
24
- return makeCreateIndexSqlUnsafe(tableName, indexFields, pgSchema);
28
+ return makeCreateIndexSql(tableName, indexFields, pgSchema);
25
29
  };
26
30
  var singleIndices = Table.getSingleIndices(table);
27
31
  var compositeIndices = Table.getCompositeIndices(table);
28
32
  return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
29
33
  }
30
34
 
31
- function makeCreateTableSqlUnsafe(table, pgSchema) {
35
+ function makeCreateTableSql(table, pgSchema) {
32
36
  var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
33
37
  var defaultValue = field.defaultValue;
34
38
  var fieldType = field.fieldType;
@@ -52,8 +56,12 @@ function makeCreateTableSqlUnsafe(table, pgSchema) {
52
56
  ) + ");";
53
57
  }
54
58
 
55
- function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enums, cleanRun) {
56
- var allTables = $$Array.copy(staticTables);
59
+ function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt, cleanRunOpt) {
60
+ var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
61
+ var entities = entitiesOpt !== undefined ? entitiesOpt : [];
62
+ var enums = enumsOpt !== undefined ? enumsOpt : [];
63
+ var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
64
+ var allTables = $$Array.copy(generalTables);
57
65
  var allEntityTables = [];
58
66
  entities.forEach(function (entity) {
59
67
  allEntityTables.push(entity.table);
@@ -75,10 +83,10 @@ function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enu
75
83
  );
76
84
  });
77
85
  allTables.forEach(function (table) {
78
- query.contents = query.contents + "\n" + makeCreateTableSqlUnsafe(table, pgSchema);
86
+ query.contents = query.contents + "\n" + makeCreateTableSql(table, pgSchema);
79
87
  });
80
88
  allTables.forEach(function (table) {
81
- var indices = makeCreateTableIndicesSqlUnsafe(table, pgSchema);
89
+ var indices = makeCreateTableIndicesSql(table, pgSchema);
82
90
  if (indices !== "") {
83
91
  query.contents = query.contents + "\n" + indices;
84
92
  return ;
@@ -92,10 +100,153 @@ function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enu
92
100
  functionsQuery.contents = functionsQuery.contents + "\n" + entity.entityHistory.createInsertFnQuery;
93
101
  Table.getDerivedFromFields(entity.table).forEach(function (derivedFromField) {
94
102
  var indexField = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(derivedSchema, derivedFromField));
95
- query.contents = query.contents + "\n" + makeCreateIndexSqlUnsafe(derivedFromField.derivedFromEntity, [indexField], pgSchema);
103
+ query.contents = query.contents + "\n" + makeCreateIndexSql(derivedFromField.derivedFromEntity, [indexField], pgSchema);
96
104
  });
97
105
  });
98
- return [cleanRun ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
106
+ return [cleanRun || Utils.$$Array.isEmpty(enums) ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
107
+ }
108
+
109
+ function makeLoadByIdSql(pgSchema, tableName) {
110
+ return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = $1 LIMIT 1;";
111
+ }
112
+
113
+ function makeLoadByIdsSql(pgSchema, tableName) {
114
+ return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = ANY($1::text[]);";
115
+ }
116
+
117
+ function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
118
+ var match = Table.toSqlParams(table, itemSchema);
119
+ var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
120
+ var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
121
+ return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + match.quotedFieldNames.join(", ") + ")\nSELECT * FROM unnest(" + match.arrayFieldTypes.map(function (arrayFieldType, idx) {
122
+ return "$" + (idx + 1 | 0).toString() + "::" + arrayFieldType;
123
+ }).join(",") + ")" + (
124
+ isRawEvents || primaryKeyFieldNames.length === 0 ? "" : "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
125
+ return "\"" + fieldName + "\"";
126
+ }).join(",") + ") DO " + (
127
+ Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
128
+ return fieldName + " = EXCLUDED." + fieldName;
129
+ }).join(",")
130
+ )
131
+ ) + ";";
132
+ }
133
+
134
+ function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
135
+ var match = Table.toSqlParams(table, itemSchema);
136
+ var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
137
+ var quotedFieldNames = match.quotedFieldNames;
138
+ var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
139
+ var fieldsCount = quotedFieldNames.length;
140
+ var placeholders = "";
141
+ for(var idx = 1; idx <= itemsCount; ++idx){
142
+ if (idx > 1) {
143
+ placeholders = placeholders + ",";
144
+ }
145
+ placeholders = placeholders + "(";
146
+ for(var fieldIdx = 0; fieldIdx < fieldsCount; ++fieldIdx){
147
+ if (fieldIdx > 0) {
148
+ placeholders = placeholders + ",";
149
+ }
150
+ placeholders = placeholders + ("$" + (Math.imul(fieldIdx, itemsCount) + idx | 0).toString());
151
+ }
152
+ placeholders = placeholders + ")";
153
+ }
154
+ return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + quotedFieldNames.join(", ") + ")\nVALUES" + placeholders + (
155
+ primaryKeyFieldNames.length !== 0 ? "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
156
+ return "\"" + fieldName + "\"";
157
+ }).join(",") + ") DO " + (
158
+ Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
159
+ return fieldName + " = EXCLUDED." + fieldName;
160
+ }).join(",")
161
+ ) : ""
162
+ ) + ";";
163
+ }
164
+
165
+ var rawEventsTableName = "raw_events";
166
+
167
+ function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
168
+ var match = Table.toSqlParams(table, itemSchema);
169
+ var isRawEvents = table.tableName === rawEventsTableName;
170
+ if (isRawEvents || !match.hasArrayField) {
171
+ return {
172
+ sql: makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents),
173
+ convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.unnest(match.dbSchema), "Output", "Input", "Sync", false),
174
+ isInsertValues: false
175
+ };
176
+ } else {
177
+ return {
178
+ sql: makeInsertValuesSetSql(pgSchema, table, itemSchema, 500),
179
+ convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.preprocess(S$RescriptSchema.unnest(itemSchema), (function (param) {
180
+ return {
181
+ s: (function (prim) {
182
+ return prim.flat(1);
183
+ })
184
+ };
185
+ })), "Output", "Input", "Sync", false),
186
+ isInsertValues: true
187
+ };
188
+ }
189
+ }
190
+
191
+ function chunkArray(arr, chunkSize) {
192
+ var chunks = [];
193
+ var i = 0;
194
+ while(i < arr.length) {
195
+ var chunk = arr.slice(i, i + chunkSize | 0);
196
+ chunks.push(chunk);
197
+ i = i + chunkSize | 0;
198
+ };
199
+ return chunks;
200
+ }
201
+
202
+ var setQueryCache = new WeakMap();
203
+
204
+ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
205
+ if (items.length === 0) {
206
+ return ;
207
+ }
208
+ var cached = setQueryCache.get(table);
209
+ var query;
210
+ if (cached !== undefined) {
211
+ query = Caml_option.valFromOption(cached);
212
+ } else {
213
+ var newQuery = makeTableBatchSetQuery(pgSchema, table, itemSchema);
214
+ setQueryCache.set(table, newQuery);
215
+ query = newQuery;
216
+ }
217
+ var sqlQuery = query.sql;
218
+ try {
219
+ if (!query.isInsertValues) {
220
+ return await sql.unsafe(sqlQuery, query.convertOrThrow(items), {prepare: true});
221
+ }
222
+ var chunks = chunkArray(items, 500);
223
+ var responses = [];
224
+ chunks.forEach(function (chunk) {
225
+ var chunkSize = chunk.length;
226
+ var isFullChunk = chunkSize === 500;
227
+ var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema, chunkSize), query.convertOrThrow(chunk), {prepare: true});
228
+ responses.push(response);
229
+ });
230
+ await Promise.all(responses);
231
+ return ;
232
+ }
233
+ catch (raw_exn){
234
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
235
+ if (exn.RE_EXN_ID === S$RescriptSchema.Raised) {
236
+ throw {
237
+ RE_EXN_ID: Persistence.StorageError,
238
+ message: "Failed to convert items for table \"" + table.tableName + "\"",
239
+ reason: exn,
240
+ Error: new Error()
241
+ };
242
+ }
243
+ throw {
244
+ RE_EXN_ID: Persistence.StorageError,
245
+ message: "Failed to insert items into table \"" + table.tableName + "\"",
246
+ reason: exn,
247
+ Error: new Error()
248
+ };
249
+ }
99
250
  }
100
251
 
101
252
  function make(sql, pgSchema, pgUser) {
@@ -103,23 +254,73 @@ function make(sql, pgSchema, pgUser) {
103
254
  var schemas = await sql.unsafe("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '" + pgSchema + "';");
104
255
  return Utils.$$Array.notEmpty(schemas);
105
256
  };
106
- var initialize = async function (entities, staticTables, enums, cleanRun) {
107
- var queries = makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enums, cleanRun);
257
+ var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt, cleanRunOpt) {
258
+ var entities = entitiesOpt !== undefined ? entitiesOpt : [];
259
+ var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
260
+ var enums = enumsOpt !== undefined ? enumsOpt : [];
261
+ var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
262
+ var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums, cleanRun);
108
263
  await sql.begin(function (sql) {
109
264
  return queries.map(function (query) {
110
265
  return sql.unsafe(query);
111
266
  });
112
267
  });
113
268
  };
269
+ var loadByIdsOrThrow = async function (ids, table, rowsSchema) {
270
+ var rows;
271
+ try {
272
+ rows = await (
273
+ ids.length !== 1 ? sql.unsafe(makeLoadByIdsSql(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdSql(pgSchema, table.tableName), ids, {prepare: true})
274
+ );
275
+ }
276
+ catch (raw_exn){
277
+ var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
278
+ throw {
279
+ RE_EXN_ID: Persistence.StorageError,
280
+ message: "Failed loading \"" + table.tableName + "\" from storage by ids",
281
+ reason: exn,
282
+ Error: new Error()
283
+ };
284
+ }
285
+ try {
286
+ return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
287
+ }
288
+ catch (raw_exn$1){
289
+ var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
290
+ throw {
291
+ RE_EXN_ID: Persistence.StorageError,
292
+ message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
293
+ reason: exn$1,
294
+ Error: new Error()
295
+ };
296
+ }
297
+ };
298
+ var setOrThrow$1 = function (items, table, itemSchema) {
299
+ return setOrThrow(sql, items, table, itemSchema, pgSchema);
300
+ };
114
301
  return {
115
302
  isInitialized: isInitialized,
116
- initialize: initialize
303
+ initialize: initialize,
304
+ loadByIdsOrThrow: loadByIdsOrThrow,
305
+ setOrThrow: setOrThrow$1
117
306
  };
118
307
  }
119
308
 
120
- exports.makeCreateIndexSqlUnsafe = makeCreateIndexSqlUnsafe;
121
- exports.makeCreateTableIndicesSqlUnsafe = makeCreateTableIndicesSqlUnsafe;
122
- exports.makeCreateTableSqlUnsafe = makeCreateTableSqlUnsafe;
309
+ var maxItemsPerQuery = 500;
310
+
311
+ exports.makeCreateIndexSql = makeCreateIndexSql;
312
+ exports.makeCreateTableIndicesSql = makeCreateTableIndicesSql;
313
+ exports.makeCreateTableSql = makeCreateTableSql;
123
314
  exports.makeInitializeTransaction = makeInitializeTransaction;
315
+ exports.makeLoadByIdSql = makeLoadByIdSql;
316
+ exports.makeLoadByIdsSql = makeLoadByIdsSql;
317
+ exports.makeInsertUnnestSetSql = makeInsertUnnestSetSql;
318
+ exports.makeInsertValuesSetSql = makeInsertValuesSetSql;
319
+ exports.rawEventsTableName = rawEventsTableName;
320
+ exports.maxItemsPerQuery = maxItemsPerQuery;
321
+ exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
322
+ exports.chunkArray = chunkArray;
323
+ exports.setQueryCache = setQueryCache;
324
+ exports.setOrThrow = setOrThrow;
124
325
  exports.make = make;
125
- /* Table Not a pure module */
326
+ /* setQueryCache Not a pure module */
@@ -173,8 +173,9 @@ let batchInsertRows = (self: t<'entity>, ~sql, ~rows: array<historyRow<'entity>>
173
173
  type entityInternal
174
174
 
175
175
  external castInternal: t<'entity> => t<entityInternal> = "%identity"
176
+ external eval: string => 'a = "eval"
176
177
 
177
- let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
178
+ let fromTable = (table: table, ~pgSchema, ~schema: S.t<'entity>): t<'entity> => {
178
179
  let entity_history_block_timestamp = "entity_history_block_timestamp"
179
180
  let entity_history_chain_id = "entity_history_chain_id"
180
181
  let entity_history_block_number = "entity_history_block_number"
@@ -235,12 +236,10 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
235
236
  let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName)
236
237
 
237
238
  let originTableName = table.tableName
238
- let originSchemaName = table.schemaName
239
239
  let historyTableName = originTableName ++ "_history"
240
240
  //ignore composite indices
241
241
  let table = mkTable(
242
242
  historyTableName,
243
- ~schemaName=originSchemaName,
244
243
  ~fields=Belt.Array.concatMany([
245
244
  currentHistoryFields,
246
245
  previousHistoryFields,
@@ -251,8 +250,8 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
251
250
 
252
251
  let insertFnName = `"insert_${table.tableName}"`
253
252
  let historyRowArg = "history_row"
254
- let historyTablePath = `"${originSchemaName}"."${historyTableName}"`
255
- let originTablePath = `"${originSchemaName}"."${originTableName}"`
253
+ let historyTablePath = `"${pgSchema}"."${historyTableName}"`
254
+ let originTablePath = `"${pgSchema}"."${originTableName}"`
256
255
 
257
256
  let previousHistoryFieldsAreNullStr =
258
257
  previousChangeFieldNames
@@ -335,7 +334,7 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
335
334
  \${shouldCopyCurrentEntity});\``
336
335
 
337
336
  let insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise<unit> =
338
- insertFnString->Table.PostgresInterop.eval
337
+ insertFnString->eval
339
338
 
340
339
  let schema = makeHistoryRowSchema(schema)
341
340
 
@@ -173,7 +173,7 @@ function batchInsertRows(self, sql, rows) {
173
173
  });
174
174
  }
175
175
 
176
- function fromTable(table, schema) {
176
+ function fromTable(table, pgSchema, schema) {
177
177
  var currentChangeFieldNames = [
178
178
  "entity_history_block_timestamp",
179
179
  "entity_history_chain_id",
@@ -238,9 +238,8 @@ function fromTable(table, schema) {
238
238
  return Table.getFieldName(field);
239
239
  }));
240
240
  var originTableName = table.tableName;
241
- var originSchemaName = table.schemaName;
242
241
  var historyTableName = originTableName + "_history";
243
- var table$1 = Table.mkTable(historyTableName, originSchemaName, undefined, Belt_Array.concatMany([
242
+ var table$1 = Table.mkTable(historyTableName, undefined, Belt_Array.concatMany([
244
243
  currentHistoryFields,
245
244
  previousHistoryFields,
246
245
  dataFields,
@@ -251,8 +250,8 @@ function fromTable(table, schema) {
251
250
  ]));
252
251
  var insertFnName = "\"insert_" + table$1.tableName + "\"";
253
252
  var historyRowArg = "history_row";
254
- var historyTablePath = "\"" + originSchemaName + "\".\"" + historyTableName + "\"";
255
- var originTablePath = "\"" + originSchemaName + "\".\"" + originTableName + "\"";
253
+ var historyTablePath = "\"" + pgSchema + "\".\"" + historyTableName + "\"";
254
+ var originTablePath = "\"" + pgSchema + "\".\"" + originTableName + "\"";
256
255
  var previousHistoryFieldsAreNullStr = Belt_Array.map(previousChangeFieldNames, (function (fieldName) {
257
256
  return historyRowArg + "." + fieldName + " IS NULL";
258
257
  })).join(" OR ");
package/src/db/Table.res CHANGED
@@ -89,14 +89,12 @@ let getFieldType = (field: field) => {
89
89
 
90
90
  type table = {
91
91
  tableName: string,
92
- schemaName: string,
93
92
  fields: array<fieldOrDerived>,
94
93
  compositeIndices: array<array<string>>,
95
94
  }
96
95
 
97
- let mkTable = (tableName, ~schemaName, ~compositeIndices=[], ~fields) => {
96
+ let mkTable = (tableName, ~compositeIndices=[], ~fields) => {
98
97
  tableName,
99
- schemaName,
100
98
  fields,
101
99
  compositeIndices,
102
100
  }
@@ -299,59 +297,3 @@ let getCompositeIndices = (table): array<array<string>> => {
299
297
  ->getUnfilteredCompositeIndicesUnsafe
300
298
  ->Array.keep(ind => ind->Array.length > 1)
301
299
  }
302
-
303
- module PostgresInterop = {
304
- type pgFn<'payload, 'return> = (Postgres.sql, 'payload) => promise<'return>
305
- type batchSetFn<'a> = (Postgres.sql, array<'a>) => promise<unit>
306
- external eval: string => 'a = "eval"
307
-
308
- let makeBatchSetFnString = (table: table) => {
309
- let fieldNamesInQuotes =
310
- table->getNonDefaultFieldNames->Array.map(fieldName => `"${fieldName}"`)
311
- `(sql, rows) => {
312
- return sql\`
313
- INSERT INTO "${table.schemaName}"."${table.tableName}"
314
- \${sql(rows, ${fieldNamesInQuotes->Js.Array2.joinWith(", ")})}
315
- ON CONFLICT(${table->getPrimaryKeyFieldNames->Js.Array2.joinWith(", ")}) DO UPDATE
316
- SET
317
- ${fieldNamesInQuotes
318
- ->Array.map(fieldNameInQuotes => `${fieldNameInQuotes} = EXCLUDED.${fieldNameInQuotes}`)
319
- ->Js.Array2.joinWith(", ")};\`
320
- }`
321
- }
322
-
323
- let chunkBatchQuery = (
324
- sql,
325
- entityDataArray: array<'entity>,
326
- queryToExecute: pgFn<array<'entity>, 'return>,
327
- ~maxItemsPerQuery=500,
328
- ): promise<array<'return>> => {
329
- let responses = []
330
- let i = ref(0)
331
- let shouldContinue = () => i.contents < entityDataArray->Array.length
332
- // Split entityDataArray into chunks of maxItemsPerQuery
333
- while shouldContinue() {
334
- let chunk =
335
- entityDataArray->Js.Array2.slice(~start=i.contents, ~end_=i.contents + maxItemsPerQuery)
336
- let response = queryToExecute(sql, chunk)
337
- responses->Js.Array2.push(response)->ignore
338
- i := i.contents + maxItemsPerQuery
339
- }
340
- Promise.all(responses)
341
- }
342
-
343
- let makeBatchSetFn = (~table, ~schema: S.t<'a>): batchSetFn<'a> => {
344
- let batchSetFn: pgFn<array<Js.Json.t>, unit> = table->makeBatchSetFnString->eval
345
- let parseOrThrow = S.compile(
346
- S.array(schema),
347
- ~input=Value,
348
- ~output=Json,
349
- ~mode=Sync,
350
- ~typeValidation=true,
351
- )
352
- async (sql, rows) => {
353
- let rowsJson = rows->parseOrThrow->(Utils.magic: Js.Json.t => array<Js.Json.t>)
354
- let _res = await chunkBatchQuery(sql, rowsJson, batchSetFn)
355
- }
356
- }
357
- }
@@ -72,11 +72,10 @@ function getFieldType(field) {
72
72
  );
73
73
  }
74
74
 
75
- function mkTable(tableName, schemaName, compositeIndicesOpt, fields) {
75
+ function mkTable(tableName, compositeIndicesOpt, fields) {
76
76
  var compositeIndices = compositeIndicesOpt !== undefined ? compositeIndicesOpt : [];
77
77
  return {
78
78
  tableName: tableName,
79
- schemaName: schemaName,
80
79
  fields: fields,
81
80
  compositeIndices: compositeIndices
82
81
  };
@@ -302,43 +301,6 @@ function getCompositeIndices(table) {
302
301
  }));
303
302
  }
304
303
 
305
- function makeBatchSetFnString(table) {
306
- var fieldNamesInQuotes = Belt_Array.map(getNonDefaultFieldNames(table), (function (fieldName) {
307
- return "\"" + fieldName + "\"";
308
- }));
309
- return "(sql, rows) => {\n return sql\`\n INSERT INTO \"" + table.schemaName + "\".\"" + table.tableName + "\"\n \${sql(rows, " + fieldNamesInQuotes.join(", ") + ")}\n ON CONFLICT(" + getPrimaryKeyFieldNames(table).join(", ") + ") DO UPDATE\n SET\n " + Belt_Array.map(fieldNamesInQuotes, (function (fieldNameInQuotes) {
310
- return fieldNameInQuotes + " = EXCLUDED." + fieldNameInQuotes;
311
- })).join(", ") + ";\`\n }";
312
- }
313
-
314
- function chunkBatchQuery(sql, entityDataArray, queryToExecute, maxItemsPerQueryOpt) {
315
- var maxItemsPerQuery = maxItemsPerQueryOpt !== undefined ? maxItemsPerQueryOpt : 500;
316
- var responses = [];
317
- var i = 0;
318
- while(i < entityDataArray.length) {
319
- var chunk = entityDataArray.slice(i, i + maxItemsPerQuery | 0);
320
- var response = queryToExecute(sql, chunk);
321
- responses.push(response);
322
- i = i + maxItemsPerQuery | 0;
323
- };
324
- return Promise.all(responses);
325
- }
326
-
327
- function makeBatchSetFn(table, schema) {
328
- var batchSetFn = eval(makeBatchSetFnString(table));
329
- var parseOrThrow = S$RescriptSchema.compile(S$RescriptSchema.array(schema), "Output", "Json", "Sync", true);
330
- return async function (sql, rows) {
331
- var rowsJson = parseOrThrow(rows);
332
- await chunkBatchQuery(sql, rowsJson, batchSetFn, undefined);
333
- };
334
- }
335
-
336
- var PostgresInterop = {
337
- makeBatchSetFnString: makeBatchSetFnString,
338
- chunkBatchQuery: chunkBatchQuery,
339
- makeBatchSetFn: makeBatchSetFn
340
- };
341
-
342
304
  exports.mkField = mkField;
343
305
  exports.mkDerivedFromField = mkDerivedFromField;
344
306
  exports.getUserDefinedFieldName = getUserDefinedFieldName;
@@ -361,5 +323,4 @@ exports.getUnfilteredCompositeIndicesUnsafe = getUnfilteredCompositeIndicesUnsaf
361
323
  exports.toSqlParams = toSqlParams;
362
324
  exports.getSingleIndices = getSingleIndices;
363
325
  exports.getCompositeIndices = getCompositeIndices;
364
- exports.PostgresInterop = PostgresInterop;
365
326
  /* BigInt Not a pure module */