envio 2.22.2 → 2.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/Logging.res +8 -0
- package/src/Logging.res.js +29 -0
- package/src/Persistence.res +31 -5
- package/src/Persistence.res.js +18 -2
- package/src/PgStorage.res +296 -17
- package/src/PgStorage.res.js +227 -19
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.
|
|
3
|
+
"version": "v2.23.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.
|
|
29
|
-
"envio-linux-arm64": "v2.
|
|
30
|
-
"envio-darwin-x64": "v2.
|
|
31
|
-
"envio-darwin-arm64": "v2.
|
|
28
|
+
"envio-linux-x64": "v2.23.0",
|
|
29
|
+
"envio-linux-arm64": "v2.23.0",
|
|
30
|
+
"envio-darwin-x64": "v2.23.0",
|
|
31
|
+
"envio-darwin-arm64": "v2.23.0"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/Logging.res
CHANGED
|
@@ -168,6 +168,14 @@ let logForItem = (eventItem, level: Pino.logLevel, message: string, ~params=?) =
|
|
|
168
168
|
(eventItem->getEventLogger->Utils.magic->Js.Dict.unsafeGet((level :> string)))(params, message)
|
|
169
169
|
}
|
|
170
170
|
|
|
171
|
+
let noopLogger: Envio.logger = {
|
|
172
|
+
info: (_message: string, ~params as _=?) => (),
|
|
173
|
+
debug: (_message: string, ~params as _=?) => (),
|
|
174
|
+
warn: (_message: string, ~params as _=?) => (),
|
|
175
|
+
error: (_message: string, ~params as _=?) => (),
|
|
176
|
+
errorWithExn: (_message: string, _exn) => (),
|
|
177
|
+
}
|
|
178
|
+
|
|
171
179
|
let getUserLogger = (eventItem): Envio.logger => {
|
|
172
180
|
info: (message: string, ~params=?) => eventItem->logForItem(#uinfo, message, ~params?),
|
|
173
181
|
debug: (message: string, ~params=?) => eventItem->logForItem(#udebug, message, ~params?),
|
package/src/Logging.res.js
CHANGED
|
@@ -197,6 +197,34 @@ function logForItem(eventItem, level, message, params) {
|
|
|
197
197
|
return getEventLogger(eventItem)[level](params, message);
|
|
198
198
|
}
|
|
199
199
|
|
|
200
|
+
function noopLogger_debug(_message, param) {
|
|
201
|
+
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
function noopLogger_info(_message, param) {
|
|
205
|
+
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
function noopLogger_warn(_message, param) {
|
|
209
|
+
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
function noopLogger_error(_message, param) {
|
|
213
|
+
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
function noopLogger_errorWithExn(_message, _exn) {
|
|
217
|
+
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
var noopLogger = {
|
|
221
|
+
debug: noopLogger_debug,
|
|
222
|
+
info: noopLogger_info,
|
|
223
|
+
warn: noopLogger_warn,
|
|
224
|
+
error: noopLogger_error,
|
|
225
|
+
errorWithExn: noopLogger_errorWithExn
|
|
226
|
+
};
|
|
227
|
+
|
|
200
228
|
function getUserLogger(eventItem) {
|
|
201
229
|
return {
|
|
202
230
|
debug: (function (message, params) {
|
|
@@ -243,5 +271,6 @@ exports.createChild = createChild;
|
|
|
243
271
|
exports.createChildFrom = createChildFrom;
|
|
244
272
|
exports.getEventLogger = getEventLogger;
|
|
245
273
|
exports.logForItem = logForItem;
|
|
274
|
+
exports.noopLogger = noopLogger;
|
|
246
275
|
exports.getUserLogger = getUserLogger;
|
|
247
276
|
/* logLevels Not a pure module */
|
package/src/Persistence.res
CHANGED
|
@@ -12,14 +12,28 @@ type storage = {
|
|
|
12
12
|
// Should initialize the storage so we can start interacting with it
|
|
13
13
|
// Eg create connection, schema, tables, etc.
|
|
14
14
|
initialize: (
|
|
15
|
-
~entities: array<Internal.entityConfig
|
|
16
|
-
~
|
|
17
|
-
~enums: array<Internal.enumConfig<Internal.enum
|
|
15
|
+
~entities: array<Internal.entityConfig>=?,
|
|
16
|
+
~generalTables: array<Table.table>=?,
|
|
17
|
+
~enums: array<Internal.enumConfig<Internal.enum>>=?,
|
|
18
18
|
// If true, the storage should clear existing data
|
|
19
|
-
~cleanRun: bool
|
|
19
|
+
~cleanRun: bool=?,
|
|
20
|
+
) => promise<unit>,
|
|
21
|
+
@raises("StorageError")
|
|
22
|
+
loadByIdsOrThrow: 'item. (
|
|
23
|
+
~ids: array<string>,
|
|
24
|
+
~table: Table.table,
|
|
25
|
+
~rowsSchema: S.t<array<'item>>,
|
|
26
|
+
) => promise<array<'item>>,
|
|
27
|
+
@raises("StorageError")
|
|
28
|
+
setOrThrow: 'item. (
|
|
29
|
+
~items: array<'item>,
|
|
30
|
+
~table: Table.table,
|
|
31
|
+
~itemSchema: S.t<'item>,
|
|
20
32
|
) => promise<unit>,
|
|
21
33
|
}
|
|
22
34
|
|
|
35
|
+
exception StorageError({message: string, reason: exn})
|
|
36
|
+
|
|
23
37
|
type storageStatus =
|
|
24
38
|
| Unknown
|
|
25
39
|
| Initializing(promise<unit>)
|
|
@@ -33,6 +47,7 @@ type t = {
|
|
|
33
47
|
mutable storageStatus: storageStatus,
|
|
34
48
|
storage: storage,
|
|
35
49
|
onStorageInitialize: option<unit => promise<unit>>,
|
|
50
|
+
cacheStorage: storage,
|
|
36
51
|
}
|
|
37
52
|
|
|
38
53
|
let entityHistoryActionEnumConfig: Internal.enumConfig<EntityHistory.RowAction.t> = {
|
|
@@ -49,6 +64,7 @@ let make = (
|
|
|
49
64
|
~allEnums,
|
|
50
65
|
~staticTables,
|
|
51
66
|
~storage,
|
|
67
|
+
~cacheStorage,
|
|
52
68
|
~onStorageInitialize=?,
|
|
53
69
|
) => {
|
|
54
70
|
let allEntities = userEntities->Js.Array2.concat([dcRegistryEntityConfig])
|
|
@@ -62,6 +78,7 @@ let make = (
|
|
|
62
78
|
storageStatus: Unknown,
|
|
63
79
|
storage,
|
|
64
80
|
onStorageInitialize,
|
|
81
|
+
cacheStorage,
|
|
65
82
|
}
|
|
66
83
|
}
|
|
67
84
|
|
|
@@ -93,7 +110,7 @@ let init = async (
|
|
|
93
110
|
} else {
|
|
94
111
|
let _ = await persistence.storage.initialize(
|
|
95
112
|
~entities=persistence.allEntities,
|
|
96
|
-
~
|
|
113
|
+
~generalTables=persistence.staticTables,
|
|
97
114
|
~enums=persistence.allEnums,
|
|
98
115
|
~cleanRun=reset || !skipIsInitializedCheck,
|
|
99
116
|
)
|
|
@@ -109,3 +126,12 @@ let init = async (
|
|
|
109
126
|
| exn => exn->ErrorHandling.mkLogAndRaise(~msg=`EE800: Failed to initialize the indexer storage.`)
|
|
110
127
|
}
|
|
111
128
|
}
|
|
129
|
+
|
|
130
|
+
let getInitializedStorageOrThrow = persistence => {
|
|
131
|
+
switch persistence.storageStatus {
|
|
132
|
+
| Unknown
|
|
133
|
+
| Initializing(_) =>
|
|
134
|
+
Js.Exn.raiseError(`Failed to access the indexer storage. The Persistence layer is not initialized.`)
|
|
135
|
+
| Ready(_) => persistence.storage
|
|
136
|
+
}
|
|
137
|
+
}
|
package/src/Persistence.res.js
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
|
+
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
4
5
|
var EntityHistory = require("./db/EntityHistory.res.js");
|
|
5
6
|
var ErrorHandling = require("./ErrorHandling.res.js");
|
|
7
|
+
var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
|
|
6
8
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
7
9
|
|
|
10
|
+
var StorageError = /* @__PURE__ */Caml_exceptions.create("Persistence.StorageError");
|
|
11
|
+
|
|
8
12
|
var entityHistoryActionEnumConfig_name = EntityHistory.RowAction.name;
|
|
9
13
|
|
|
10
14
|
var entityHistoryActionEnumConfig_variants = EntityHistory.RowAction.variants;
|
|
@@ -18,7 +22,7 @@ var entityHistoryActionEnumConfig = {
|
|
|
18
22
|
default: "SET"
|
|
19
23
|
};
|
|
20
24
|
|
|
21
|
-
function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, storage, onStorageInitialize) {
|
|
25
|
+
function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, storage, cacheStorage, onStorageInitialize) {
|
|
22
26
|
var allEntities = userEntities.concat([dcRegistryEntityConfig]);
|
|
23
27
|
var allEnums$1 = allEnums.concat([entityHistoryActionEnumConfig]);
|
|
24
28
|
return {
|
|
@@ -28,7 +32,8 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
|
|
|
28
32
|
allEnums: allEnums$1,
|
|
29
33
|
storageStatus: "Unknown",
|
|
30
34
|
storage: storage,
|
|
31
|
-
onStorageInitialize: onStorageInitialize
|
|
35
|
+
onStorageInitialize: onStorageInitialize,
|
|
36
|
+
cacheStorage: cacheStorage
|
|
32
37
|
};
|
|
33
38
|
}
|
|
34
39
|
|
|
@@ -84,7 +89,18 @@ async function init(persistence, skipIsInitializedCheckOpt, resetOpt) {
|
|
|
84
89
|
}
|
|
85
90
|
}
|
|
86
91
|
|
|
92
|
+
function getInitializedStorageOrThrow(persistence) {
|
|
93
|
+
var match = persistence.storageStatus;
|
|
94
|
+
if (typeof match !== "object" || match.TAG === "Initializing") {
|
|
95
|
+
return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
|
|
96
|
+
} else {
|
|
97
|
+
return persistence.storage;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
exports.StorageError = StorageError;
|
|
87
102
|
exports.entityHistoryActionEnumConfig = entityHistoryActionEnumConfig;
|
|
88
103
|
exports.make = make;
|
|
89
104
|
exports.init = init;
|
|
105
|
+
exports.getInitializedStorageOrThrow = getInitializedStorageOrThrow;
|
|
90
106
|
/* EntityHistory Not a pure module */
|
package/src/PgStorage.res
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
let
|
|
1
|
+
let makeCreateIndexSql = (~tableName, ~indexFields, ~pgSchema) => {
|
|
2
2
|
let indexName = tableName ++ "_" ++ indexFields->Js.Array2.joinWith("_")
|
|
3
3
|
let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Js.Array2.joinWith(", ")
|
|
4
4
|
`CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
|
|
5
5
|
}
|
|
6
6
|
|
|
7
|
-
let
|
|
7
|
+
let makeCreateTableIndicesSql = (table: Table.table, ~pgSchema) => {
|
|
8
8
|
open Belt
|
|
9
9
|
let tableName = table.tableName
|
|
10
10
|
let createIndex = indexField =>
|
|
11
|
-
|
|
11
|
+
makeCreateIndexSql(~tableName, ~indexFields=[indexField], ~pgSchema)
|
|
12
12
|
let createCompositeIndex = indexFields => {
|
|
13
|
-
|
|
13
|
+
makeCreateIndexSql(~tableName, ~indexFields, ~pgSchema)
|
|
14
14
|
}
|
|
15
15
|
|
|
16
16
|
let singleIndices = table->Table.getSingleIndices
|
|
@@ -20,7 +20,7 @@ let makeCreateTableIndicesSqlUnsafe = (table: Table.table, ~pgSchema) => {
|
|
|
20
20
|
compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
|
|
21
21
|
}
|
|
22
22
|
|
|
23
|
-
let
|
|
23
|
+
let makeCreateTableSql = (table: Table.table, ~pgSchema) => {
|
|
24
24
|
open Belt
|
|
25
25
|
let fieldsMapped =
|
|
26
26
|
table
|
|
@@ -55,12 +55,12 @@ let makeCreateTableSqlUnsafe = (table: Table.table, ~pgSchema) => {
|
|
|
55
55
|
let makeInitializeTransaction = (
|
|
56
56
|
~pgSchema,
|
|
57
57
|
~pgUser,
|
|
58
|
-
~
|
|
59
|
-
~entities,
|
|
60
|
-
~enums,
|
|
61
|
-
~cleanRun,
|
|
58
|
+
~generalTables=[],
|
|
59
|
+
~entities=[],
|
|
60
|
+
~enums=[],
|
|
61
|
+
~cleanRun=false,
|
|
62
62
|
) => {
|
|
63
|
-
let allTables =
|
|
63
|
+
let allTables = generalTables->Array.copy
|
|
64
64
|
let allEntityTables = []
|
|
65
65
|
entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
|
|
66
66
|
allEntityTables->Js.Array2.push(entity.table)->ignore
|
|
@@ -76,7 +76,7 @@ let makeInitializeTransaction = (
|
|
|
76
76
|
CREATE SCHEMA "${pgSchema}";`
|
|
77
77
|
: `CREATE SCHEMA IF NOT EXISTS "${pgSchema}";`
|
|
78
78
|
) ++
|
|
79
|
-
`GRANT ALL ON SCHEMA "${pgSchema}" TO ${pgUser};
|
|
79
|
+
`GRANT ALL ON SCHEMA "${pgSchema}" TO "${pgUser}";
|
|
80
80
|
GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
81
81
|
)
|
|
82
82
|
|
|
@@ -106,12 +106,12 @@ END IF;`
|
|
|
106
106
|
|
|
107
107
|
// Batch all table creation first (optimal for PostgreSQL)
|
|
108
108
|
allTables->Js.Array2.forEach((table: Table.table) => {
|
|
109
|
-
query := query.contents ++ "\n" ++
|
|
109
|
+
query := query.contents ++ "\n" ++ makeCreateTableSql(table, ~pgSchema)
|
|
110
110
|
})
|
|
111
111
|
|
|
112
112
|
// Then batch all indices (better performance when tables exist)
|
|
113
113
|
allTables->Js.Array2.forEach((table: Table.table) => {
|
|
114
|
-
let indices =
|
|
114
|
+
let indices = makeCreateTableIndicesSql(table, ~pgSchema)
|
|
115
115
|
if indices !== "" {
|
|
116
116
|
query := query.contents ++ "\n" ++ indices
|
|
117
117
|
}
|
|
@@ -131,7 +131,7 @@ END IF;`
|
|
|
131
131
|
query :=
|
|
132
132
|
query.contents ++
|
|
133
133
|
"\n" ++
|
|
134
|
-
|
|
134
|
+
makeCreateIndexSql(
|
|
135
135
|
~tableName=derivedFromField.derivedFromEntity,
|
|
136
136
|
~indexFields=[indexField],
|
|
137
137
|
~pgSchema,
|
|
@@ -143,11 +143,238 @@ END IF;`
|
|
|
143
143
|
// Return optimized queries - main DDL in DO block, functions separate
|
|
144
144
|
// Note: DO $$ BEGIN wrapper is only needed for PL/pgSQL conditionals (IF NOT EXISTS)
|
|
145
145
|
// Reset case uses direct DDL (faster), non-cleanRun case uses conditionals (safer)
|
|
146
|
-
cleanRun
|
|
146
|
+
cleanRun || enums->Utils.Array.isEmpty
|
|
147
|
+
? query.contents
|
|
148
|
+
: `DO $$ BEGIN ${query.contents} END $$;`,
|
|
147
149
|
// Functions query (separate as they can't be in DO block)
|
|
148
150
|
]->Js.Array2.concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : [])
|
|
149
151
|
}
|
|
150
152
|
|
|
153
|
+
let makeLoadByIdSql = (~pgSchema, ~tableName) => {
|
|
154
|
+
`SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = $1 LIMIT 1;`
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
let makeLoadByIdsSql = (~pgSchema, ~tableName) => {
|
|
158
|
+
`SELECT * FROM "${pgSchema}"."${tableName}" WHERE id = ANY($1::text[]);`
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
let makeInsertUnnestSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~isRawEvents) => {
|
|
162
|
+
let {quotedFieldNames, quotedNonPrimaryFieldNames, arrayFieldTypes} =
|
|
163
|
+
table->Table.toSqlParams(~schema=itemSchema)
|
|
164
|
+
|
|
165
|
+
let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
|
|
166
|
+
|
|
167
|
+
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Js.Array2.joinWith(", ")})
|
|
168
|
+
SELECT * FROM unnest(${arrayFieldTypes
|
|
169
|
+
->Js.Array2.mapi((arrayFieldType, idx) => {
|
|
170
|
+
`$${(idx + 1)->Js.Int.toString}::${arrayFieldType}`
|
|
171
|
+
})
|
|
172
|
+
->Js.Array2.joinWith(",")})` ++
|
|
173
|
+
switch (isRawEvents, primaryKeyFieldNames) {
|
|
174
|
+
| (true, _)
|
|
175
|
+
| (_, []) => ``
|
|
176
|
+
| (false, primaryKeyFieldNames) =>
|
|
177
|
+
`ON CONFLICT(${primaryKeyFieldNames
|
|
178
|
+
->Js.Array2.map(fieldName => `"${fieldName}"`)
|
|
179
|
+
->Js.Array2.joinWith(",")}) DO ` ++ (
|
|
180
|
+
quotedNonPrimaryFieldNames->Utils.Array.isEmpty
|
|
181
|
+
? `NOTHING`
|
|
182
|
+
: `UPDATE SET ${quotedNonPrimaryFieldNames
|
|
183
|
+
->Js.Array2.map(fieldName => {
|
|
184
|
+
`${fieldName} = EXCLUDED.${fieldName}`
|
|
185
|
+
})
|
|
186
|
+
->Js.Array2.joinWith(",")}`
|
|
187
|
+
)
|
|
188
|
+
} ++ ";"
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
let makeInsertValuesSetSql = (~pgSchema, ~table: Table.table, ~itemSchema, ~itemsCount) => {
|
|
192
|
+
let {quotedFieldNames, quotedNonPrimaryFieldNames} = table->Table.toSqlParams(~schema=itemSchema)
|
|
193
|
+
|
|
194
|
+
let primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table)
|
|
195
|
+
let fieldsCount = quotedFieldNames->Array.length
|
|
196
|
+
|
|
197
|
+
// Create placeholder variables for the VALUES clause - using $1, $2, etc.
|
|
198
|
+
let placeholders = ref("")
|
|
199
|
+
for idx in 1 to itemsCount {
|
|
200
|
+
if idx > 1 {
|
|
201
|
+
placeholders := placeholders.contents ++ ","
|
|
202
|
+
}
|
|
203
|
+
placeholders := placeholders.contents ++ "("
|
|
204
|
+
for fieldIdx in 0 to fieldsCount - 1 {
|
|
205
|
+
if fieldIdx > 0 {
|
|
206
|
+
placeholders := placeholders.contents ++ ","
|
|
207
|
+
}
|
|
208
|
+
placeholders := placeholders.contents ++ `$${(fieldIdx * itemsCount + idx)->Js.Int.toString}`
|
|
209
|
+
}
|
|
210
|
+
placeholders := placeholders.contents ++ ")"
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
`INSERT INTO "${pgSchema}"."${table.tableName}" (${quotedFieldNames->Js.Array2.joinWith(", ")})
|
|
214
|
+
VALUES${placeholders.contents}` ++
|
|
215
|
+
switch primaryKeyFieldNames {
|
|
216
|
+
| [] => ``
|
|
217
|
+
| primaryKeyFieldNames =>
|
|
218
|
+
`ON CONFLICT(${primaryKeyFieldNames
|
|
219
|
+
->Js.Array2.map(fieldName => `"${fieldName}"`)
|
|
220
|
+
->Js.Array2.joinWith(",")}) DO ` ++ (
|
|
221
|
+
quotedNonPrimaryFieldNames->Utils.Array.isEmpty
|
|
222
|
+
? `NOTHING`
|
|
223
|
+
: `UPDATE SET ${quotedNonPrimaryFieldNames
|
|
224
|
+
->Js.Array2.map(fieldName => {
|
|
225
|
+
`${fieldName} = EXCLUDED.${fieldName}`
|
|
226
|
+
})
|
|
227
|
+
->Js.Array2.joinWith(",")}`
|
|
228
|
+
)
|
|
229
|
+
} ++ ";"
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Should move this to a better place
|
|
233
|
+
// We need it for the isRawEvents check in makeTableBatchSet
|
|
234
|
+
// to always apply the unnest optimization.
|
|
235
|
+
// This is needed, because even though it has JSON fields,
|
|
236
|
+
// they are always guaranteed to be an object.
|
|
237
|
+
// FIXME what about Fuel params?
|
|
238
|
+
let rawEventsTableName = "raw_events"
|
|
239
|
+
|
|
240
|
+
// Constants for chunking
|
|
241
|
+
let maxItemsPerQuery = 500
|
|
242
|
+
|
|
243
|
+
let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'item>) => {
|
|
244
|
+
let {dbSchema, hasArrayField} = table->Table.toSqlParams(~schema=itemSchema)
|
|
245
|
+
let isRawEvents = table.tableName === rawEventsTableName
|
|
246
|
+
|
|
247
|
+
// Should experiment how much it'll affect performance
|
|
248
|
+
// Although, it should be fine not to perform the validation check,
|
|
249
|
+
// since the values are validated by type system.
|
|
250
|
+
// As an alternative, we can only run Sury validation only when
|
|
251
|
+
// db write fails to show a better user error.
|
|
252
|
+
let typeValidation = false
|
|
253
|
+
|
|
254
|
+
if isRawEvents || !hasArrayField {
|
|
255
|
+
{
|
|
256
|
+
"sql": makeInsertUnnestSetSql(~pgSchema, ~table, ~itemSchema, ~isRawEvents),
|
|
257
|
+
"convertOrThrow": S.compile(
|
|
258
|
+
S.unnest(dbSchema),
|
|
259
|
+
~input=Value,
|
|
260
|
+
~output=Unknown,
|
|
261
|
+
~mode=Sync,
|
|
262
|
+
~typeValidation,
|
|
263
|
+
),
|
|
264
|
+
"isInsertValues": false,
|
|
265
|
+
}
|
|
266
|
+
} else {
|
|
267
|
+
{
|
|
268
|
+
"sql": makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=maxItemsPerQuery),
|
|
269
|
+
"convertOrThrow": S.compile(
|
|
270
|
+
S.unnest(itemSchema)->S.preprocess(_ => {
|
|
271
|
+
serializer: Utils.Array.flatten->Utils.magic,
|
|
272
|
+
}),
|
|
273
|
+
~input=Value,
|
|
274
|
+
~output=Unknown,
|
|
275
|
+
~mode=Sync,
|
|
276
|
+
~typeValidation,
|
|
277
|
+
),
|
|
278
|
+
"isInsertValues": true,
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
let chunkArray = (arr: array<'a>, ~chunkSize) => {
|
|
284
|
+
let chunks = []
|
|
285
|
+
let i = ref(0)
|
|
286
|
+
while i.contents < arr->Array.length {
|
|
287
|
+
let chunk = arr->Js.Array2.slice(~start=i.contents, ~end_=i.contents + chunkSize)
|
|
288
|
+
chunks->Js.Array2.push(chunk)->ignore
|
|
289
|
+
i := i.contents + chunkSize
|
|
290
|
+
}
|
|
291
|
+
chunks
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// WeakMap for caching table batch set queries
|
|
295
|
+
let setQueryCache = Utils.WeakMap.make()
|
|
296
|
+
let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema) => {
|
|
297
|
+
if items->Array.length === 0 {
|
|
298
|
+
()
|
|
299
|
+
} else {
|
|
300
|
+
// Get or create cached query for this table
|
|
301
|
+
let query = switch setQueryCache->Utils.WeakMap.get(table) {
|
|
302
|
+
| Some(cached) => cached
|
|
303
|
+
| None => {
|
|
304
|
+
let newQuery = makeTableBatchSetQuery(
|
|
305
|
+
~pgSchema,
|
|
306
|
+
~table,
|
|
307
|
+
~itemSchema=itemSchema->S.toUnknown,
|
|
308
|
+
)
|
|
309
|
+
setQueryCache->Utils.WeakMap.set(table, newQuery)->ignore
|
|
310
|
+
newQuery
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
let sqlQuery = query["sql"]
|
|
315
|
+
|
|
316
|
+
try {
|
|
317
|
+
let payload =
|
|
318
|
+
query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>))->(
|
|
319
|
+
Utils.magic: unknown => array<unknown>
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
if query["isInsertValues"] {
|
|
323
|
+
let fieldsCount = switch itemSchema->S.classify {
|
|
324
|
+
| S.Object({items}) => items->Array.length
|
|
325
|
+
| _ => Js.Exn.raiseError("Expected an object schema for table")
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Chunk the items for VALUES-based queries
|
|
329
|
+
// We need to multiply by fields count,
|
|
330
|
+
// because we flattened our entity values with S.unnest
|
|
331
|
+
// to optimize the query execution.
|
|
332
|
+
let maxChunkSize = maxItemsPerQuery * fieldsCount
|
|
333
|
+
let chunks = chunkArray(payload, ~chunkSize=maxChunkSize)
|
|
334
|
+
let responses = []
|
|
335
|
+
chunks->Js.Array2.forEach(chunk => {
|
|
336
|
+
let chunkSize = chunk->Array.length
|
|
337
|
+
let isFullChunk = chunkSize === maxChunkSize
|
|
338
|
+
|
|
339
|
+
let response = sql->Postgres.preparedUnsafe(
|
|
340
|
+
// Either use the sql query for full chunks from cache
|
|
341
|
+
// or create a new one for partial chunks on the fly.
|
|
342
|
+
isFullChunk
|
|
343
|
+
? sqlQuery
|
|
344
|
+
: makeInsertValuesSetSql(
|
|
345
|
+
~pgSchema,
|
|
346
|
+
~table,
|
|
347
|
+
~itemSchema,
|
|
348
|
+
~itemsCount=chunkSize / fieldsCount,
|
|
349
|
+
),
|
|
350
|
+
chunk->Utils.magic,
|
|
351
|
+
)
|
|
352
|
+
responses->Js.Array2.push(response)->ignore
|
|
353
|
+
})
|
|
354
|
+
let _ = await Promise.all(responses)
|
|
355
|
+
} else {
|
|
356
|
+
// Use UNNEST approach for single query
|
|
357
|
+
await sql->Postgres.preparedUnsafe(sqlQuery, payload->Obj.magic)
|
|
358
|
+
}
|
|
359
|
+
} catch {
|
|
360
|
+
| S.Raised(_) as exn =>
|
|
361
|
+
raise(
|
|
362
|
+
Persistence.StorageError({
|
|
363
|
+
message: `Failed to convert items for table "${table.tableName}"`,
|
|
364
|
+
reason: exn,
|
|
365
|
+
}),
|
|
366
|
+
)
|
|
367
|
+
| exn =>
|
|
368
|
+
raise(
|
|
369
|
+
Persistence.StorageError({
|
|
370
|
+
message: `Failed to insert items into table "${table.tableName}"`,
|
|
371
|
+
reason: exn,
|
|
372
|
+
}),
|
|
373
|
+
)
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
|
|
151
378
|
let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
|
|
152
379
|
let isInitialized = async () => {
|
|
153
380
|
let schemas =
|
|
@@ -157,11 +384,11 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
|
|
|
157
384
|
schemas->Utils.Array.notEmpty
|
|
158
385
|
}
|
|
159
386
|
|
|
160
|
-
let initialize = async (~entities, ~
|
|
387
|
+
let initialize = async (~entities=[], ~generalTables=[], ~enums=[], ~cleanRun=false) => {
|
|
161
388
|
let queries = makeInitializeTransaction(
|
|
162
389
|
~pgSchema,
|
|
163
390
|
~pgUser,
|
|
164
|
-
~
|
|
391
|
+
~generalTables,
|
|
165
392
|
~entities,
|
|
166
393
|
~enums,
|
|
167
394
|
~cleanRun,
|
|
@@ -172,8 +399,60 @@ let make = (~sql: Postgres.sql, ~pgSchema, ~pgUser): Persistence.storage => {
|
|
|
172
399
|
})
|
|
173
400
|
}
|
|
174
401
|
|
|
402
|
+
let loadByIdsOrThrow = async (~ids, ~table: Table.table, ~rowsSchema) => {
|
|
403
|
+
switch await (
|
|
404
|
+
switch ids {
|
|
405
|
+
| [_] =>
|
|
406
|
+
sql->Postgres.preparedUnsafe(
|
|
407
|
+
makeLoadByIdSql(~pgSchema, ~tableName=table.tableName),
|
|
408
|
+
ids->Obj.magic,
|
|
409
|
+
)
|
|
410
|
+
| _ =>
|
|
411
|
+
sql->Postgres.preparedUnsafe(
|
|
412
|
+
makeLoadByIdsSql(~pgSchema, ~tableName=table.tableName),
|
|
413
|
+
[ids]->Obj.magic,
|
|
414
|
+
)
|
|
415
|
+
}
|
|
416
|
+
) {
|
|
417
|
+
| exception exn =>
|
|
418
|
+
raise(
|
|
419
|
+
Persistence.StorageError({
|
|
420
|
+
message: `Failed loading "${table.tableName}" from storage by ids`,
|
|
421
|
+
reason: exn,
|
|
422
|
+
}),
|
|
423
|
+
)
|
|
424
|
+
| rows =>
|
|
425
|
+
try rows->S.parseOrThrow(rowsSchema) catch {
|
|
426
|
+
| exn =>
|
|
427
|
+
raise(
|
|
428
|
+
Persistence.StorageError({
|
|
429
|
+
message: `Failed to parse "${table.tableName}" loaded from storage by ids`,
|
|
430
|
+
reason: exn,
|
|
431
|
+
}),
|
|
432
|
+
)
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
let setOrThrow = (
|
|
438
|
+
type item,
|
|
439
|
+
~items: array<item>,
|
|
440
|
+
~table: Table.table,
|
|
441
|
+
~itemSchema: S.t<item>,
|
|
442
|
+
) => {
|
|
443
|
+
setOrThrow(
|
|
444
|
+
sql,
|
|
445
|
+
~items=items->(Utils.magic: array<item> => array<unknown>),
|
|
446
|
+
~table,
|
|
447
|
+
~itemSchema=itemSchema->S.toUnknown,
|
|
448
|
+
~pgSchema,
|
|
449
|
+
)
|
|
450
|
+
}
|
|
451
|
+
|
|
175
452
|
{
|
|
176
453
|
isInitialized,
|
|
177
454
|
initialize,
|
|
455
|
+
loadByIdsOrThrow,
|
|
456
|
+
setOrThrow,
|
|
178
457
|
}
|
|
179
458
|
}
|
package/src/PgStorage.res.js
CHANGED
|
@@ -4,10 +4,16 @@
|
|
|
4
4
|
var $$Array = require("rescript/lib/js/array.js");
|
|
5
5
|
var Table = require("./db/Table.res.js");
|
|
6
6
|
var Utils = require("./Utils.res.js");
|
|
7
|
+
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
7
8
|
var Schema = require("./db/Schema.res.js");
|
|
8
9
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
10
|
+
var Caml_int32 = require("rescript/lib/js/caml_int32.js");
|
|
11
|
+
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
12
|
+
var Persistence = require("./Persistence.res.js");
|
|
13
|
+
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
14
|
+
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
9
15
|
|
|
10
|
-
function
|
|
16
|
+
function makeCreateIndexSql(tableName, indexFields, pgSchema) {
|
|
11
17
|
var indexName = tableName + "_" + indexFields.join("_");
|
|
12
18
|
var index = Belt_Array.map(indexFields, (function (idx) {
|
|
13
19
|
return "\"" + idx + "\"";
|
|
@@ -15,20 +21,20 @@ function makeCreateIndexSqlUnsafe(tableName, indexFields, pgSchema) {
|
|
|
15
21
|
return "CREATE INDEX IF NOT EXISTS \"" + indexName + "\" ON \"" + pgSchema + "\".\"" + tableName + "\"(" + index + ");";
|
|
16
22
|
}
|
|
17
23
|
|
|
18
|
-
function
|
|
24
|
+
function makeCreateTableIndicesSql(table, pgSchema) {
|
|
19
25
|
var tableName = table.tableName;
|
|
20
26
|
var createIndex = function (indexField) {
|
|
21
|
-
return
|
|
27
|
+
return makeCreateIndexSql(tableName, [indexField], pgSchema);
|
|
22
28
|
};
|
|
23
29
|
var createCompositeIndex = function (indexFields) {
|
|
24
|
-
return
|
|
30
|
+
return makeCreateIndexSql(tableName, indexFields, pgSchema);
|
|
25
31
|
};
|
|
26
32
|
var singleIndices = Table.getSingleIndices(table);
|
|
27
33
|
var compositeIndices = Table.getCompositeIndices(table);
|
|
28
34
|
return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
|
|
29
35
|
}
|
|
30
36
|
|
|
31
|
-
function
|
|
37
|
+
function makeCreateTableSql(table, pgSchema) {
|
|
32
38
|
var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
|
|
33
39
|
var defaultValue = field.defaultValue;
|
|
34
40
|
var fieldType = field.fieldType;
|
|
@@ -52,8 +58,12 @@ function makeCreateTableSqlUnsafe(table, pgSchema) {
|
|
|
52
58
|
) + ");";
|
|
53
59
|
}
|
|
54
60
|
|
|
55
|
-
function makeInitializeTransaction(pgSchema, pgUser,
|
|
56
|
-
var
|
|
61
|
+
function makeInitializeTransaction(pgSchema, pgUser, generalTablesOpt, entitiesOpt, enumsOpt, cleanRunOpt) {
|
|
62
|
+
var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
|
|
63
|
+
var entities = entitiesOpt !== undefined ? entitiesOpt : [];
|
|
64
|
+
var enums = enumsOpt !== undefined ? enumsOpt : [];
|
|
65
|
+
var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
|
|
66
|
+
var allTables = $$Array.copy(generalTables);
|
|
57
67
|
var allEntityTables = [];
|
|
58
68
|
entities.forEach(function (entity) {
|
|
59
69
|
allEntityTables.push(entity.table);
|
|
@@ -64,7 +74,7 @@ function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enu
|
|
|
64
74
|
var query = {
|
|
65
75
|
contents: (
|
|
66
76
|
cleanRun ? "DROP SCHEMA IF EXISTS \"" + pgSchema + "\" CASCADE;\nCREATE SCHEMA \"" + pgSchema + "\";" : "CREATE SCHEMA IF NOT EXISTS \"" + pgSchema + "\";"
|
|
67
|
-
) + ("GRANT ALL ON SCHEMA \"" + pgSchema + "\" TO " + pgUser + ";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;")
|
|
77
|
+
) + ("GRANT ALL ON SCHEMA \"" + pgSchema + "\" TO \"" + pgUser + "\";\nGRANT ALL ON SCHEMA \"" + pgSchema + "\" TO public;")
|
|
68
78
|
};
|
|
69
79
|
enums.forEach(function (enumConfig) {
|
|
70
80
|
var enumCreateQuery = "CREATE TYPE \"" + pgSchema + "\"." + enumConfig.name + " AS ENUM(" + enumConfig.variants.map(function (v) {
|
|
@@ -75,10 +85,10 @@ function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enu
|
|
|
75
85
|
);
|
|
76
86
|
});
|
|
77
87
|
allTables.forEach(function (table) {
|
|
78
|
-
query.contents = query.contents + "\n" +
|
|
88
|
+
query.contents = query.contents + "\n" + makeCreateTableSql(table, pgSchema);
|
|
79
89
|
});
|
|
80
90
|
allTables.forEach(function (table) {
|
|
81
|
-
var indices =
|
|
91
|
+
var indices = makeCreateTableIndicesSql(table, pgSchema);
|
|
82
92
|
if (indices !== "") {
|
|
83
93
|
query.contents = query.contents + "\n" + indices;
|
|
84
94
|
return ;
|
|
@@ -92,10 +102,158 @@ function makeInitializeTransaction(pgSchema, pgUser, staticTables, entities, enu
|
|
|
92
102
|
functionsQuery.contents = functionsQuery.contents + "\n" + entity.entityHistory.createInsertFnQuery;
|
|
93
103
|
Table.getDerivedFromFields(entity.table).forEach(function (derivedFromField) {
|
|
94
104
|
var indexField = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(derivedSchema, derivedFromField));
|
|
95
|
-
query.contents = query.contents + "\n" +
|
|
105
|
+
query.contents = query.contents + "\n" + makeCreateIndexSql(derivedFromField.derivedFromEntity, [indexField], pgSchema);
|
|
96
106
|
});
|
|
97
107
|
});
|
|
98
|
-
return [cleanRun ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
|
|
108
|
+
return [cleanRun || Utils.$$Array.isEmpty(enums) ? query.contents : "DO $$ BEGIN " + query.contents + " END $$;"].concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : []);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
function makeLoadByIdSql(pgSchema, tableName) {
|
|
112
|
+
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = $1 LIMIT 1;";
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
function makeLoadByIdsSql(pgSchema, tableName) {
|
|
116
|
+
return "SELECT * FROM \"" + pgSchema + "\".\"" + tableName + "\" WHERE id = ANY($1::text[]);";
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
function makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents) {
|
|
120
|
+
var match = Table.toSqlParams(table, itemSchema);
|
|
121
|
+
var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
|
|
122
|
+
var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
|
|
123
|
+
return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + match.quotedFieldNames.join(", ") + ")\nSELECT * FROM unnest(" + match.arrayFieldTypes.map(function (arrayFieldType, idx) {
|
|
124
|
+
return "$" + (idx + 1 | 0).toString() + "::" + arrayFieldType;
|
|
125
|
+
}).join(",") + ")" + (
|
|
126
|
+
isRawEvents || primaryKeyFieldNames.length === 0 ? "" : "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
|
|
127
|
+
return "\"" + fieldName + "\"";
|
|
128
|
+
}).join(",") + ") DO " + (
|
|
129
|
+
Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
|
|
130
|
+
return fieldName + " = EXCLUDED." + fieldName;
|
|
131
|
+
}).join(",")
|
|
132
|
+
)
|
|
133
|
+
) + ";";
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
function makeInsertValuesSetSql(pgSchema, table, itemSchema, itemsCount) {
|
|
137
|
+
var match = Table.toSqlParams(table, itemSchema);
|
|
138
|
+
var quotedNonPrimaryFieldNames = match.quotedNonPrimaryFieldNames;
|
|
139
|
+
var quotedFieldNames = match.quotedFieldNames;
|
|
140
|
+
var primaryKeyFieldNames = Table.getPrimaryKeyFieldNames(table);
|
|
141
|
+
var fieldsCount = quotedFieldNames.length;
|
|
142
|
+
var placeholders = "";
|
|
143
|
+
for(var idx = 1; idx <= itemsCount; ++idx){
|
|
144
|
+
if (idx > 1) {
|
|
145
|
+
placeholders = placeholders + ",";
|
|
146
|
+
}
|
|
147
|
+
placeholders = placeholders + "(";
|
|
148
|
+
for(var fieldIdx = 0; fieldIdx < fieldsCount; ++fieldIdx){
|
|
149
|
+
if (fieldIdx > 0) {
|
|
150
|
+
placeholders = placeholders + ",";
|
|
151
|
+
}
|
|
152
|
+
placeholders = placeholders + ("$" + (Math.imul(fieldIdx, itemsCount) + idx | 0).toString());
|
|
153
|
+
}
|
|
154
|
+
placeholders = placeholders + ")";
|
|
155
|
+
}
|
|
156
|
+
return "INSERT INTO \"" + pgSchema + "\".\"" + table.tableName + "\" (" + quotedFieldNames.join(", ") + ")\nVALUES" + placeholders + (
|
|
157
|
+
primaryKeyFieldNames.length !== 0 ? "ON CONFLICT(" + primaryKeyFieldNames.map(function (fieldName) {
|
|
158
|
+
return "\"" + fieldName + "\"";
|
|
159
|
+
}).join(",") + ") DO " + (
|
|
160
|
+
Utils.$$Array.isEmpty(quotedNonPrimaryFieldNames) ? "NOTHING" : "UPDATE SET " + quotedNonPrimaryFieldNames.map(function (fieldName) {
|
|
161
|
+
return fieldName + " = EXCLUDED." + fieldName;
|
|
162
|
+
}).join(",")
|
|
163
|
+
) : ""
|
|
164
|
+
) + ";";
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
var rawEventsTableName = "raw_events";
|
|
168
|
+
|
|
169
|
+
function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
|
|
170
|
+
var match = Table.toSqlParams(table, itemSchema);
|
|
171
|
+
var isRawEvents = table.tableName === rawEventsTableName;
|
|
172
|
+
if (isRawEvents || !match.hasArrayField) {
|
|
173
|
+
return {
|
|
174
|
+
sql: makeInsertUnnestSetSql(pgSchema, table, itemSchema, isRawEvents),
|
|
175
|
+
convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.unnest(match.dbSchema), "Output", "Input", "Sync", false),
|
|
176
|
+
isInsertValues: false
|
|
177
|
+
};
|
|
178
|
+
} else {
|
|
179
|
+
return {
|
|
180
|
+
sql: makeInsertValuesSetSql(pgSchema, table, itemSchema, 500),
|
|
181
|
+
convertOrThrow: S$RescriptSchema.compile(S$RescriptSchema.preprocess(S$RescriptSchema.unnest(itemSchema), (function (param) {
|
|
182
|
+
return {
|
|
183
|
+
s: (function (prim) {
|
|
184
|
+
return prim.flat(1);
|
|
185
|
+
})
|
|
186
|
+
};
|
|
187
|
+
})), "Output", "Input", "Sync", false),
|
|
188
|
+
isInsertValues: true
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
function chunkArray(arr, chunkSize) {
|
|
194
|
+
var chunks = [];
|
|
195
|
+
var i = 0;
|
|
196
|
+
while(i < arr.length) {
|
|
197
|
+
var chunk = arr.slice(i, i + chunkSize | 0);
|
|
198
|
+
chunks.push(chunk);
|
|
199
|
+
i = i + chunkSize | 0;
|
|
200
|
+
};
|
|
201
|
+
return chunks;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
var setQueryCache = new WeakMap();
|
|
205
|
+
|
|
206
|
+
async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
|
|
207
|
+
if (items.length === 0) {
|
|
208
|
+
return ;
|
|
209
|
+
}
|
|
210
|
+
var cached = setQueryCache.get(table);
|
|
211
|
+
var query;
|
|
212
|
+
if (cached !== undefined) {
|
|
213
|
+
query = Caml_option.valFromOption(cached);
|
|
214
|
+
} else {
|
|
215
|
+
var newQuery = makeTableBatchSetQuery(pgSchema, table, itemSchema);
|
|
216
|
+
setQueryCache.set(table, newQuery);
|
|
217
|
+
query = newQuery;
|
|
218
|
+
}
|
|
219
|
+
var sqlQuery = query.sql;
|
|
220
|
+
try {
|
|
221
|
+
var payload = query.convertOrThrow(items);
|
|
222
|
+
if (!query.isInsertValues) {
|
|
223
|
+
return await sql.unsafe(sqlQuery, payload, {prepare: true});
|
|
224
|
+
}
|
|
225
|
+
var match = itemSchema.t;
|
|
226
|
+
var fieldsCount;
|
|
227
|
+
fieldsCount = typeof match !== "object" || match.TAG !== "object" ? Js_exn.raiseError("Expected an object schema for table") : match.items.length;
|
|
228
|
+
var maxChunkSize = Math.imul(500, fieldsCount);
|
|
229
|
+
var chunks = chunkArray(payload, maxChunkSize);
|
|
230
|
+
var responses = [];
|
|
231
|
+
chunks.forEach(function (chunk) {
|
|
232
|
+
var chunkSize = chunk.length;
|
|
233
|
+
var isFullChunk = chunkSize === maxChunkSize;
|
|
234
|
+
var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema, Caml_int32.div(chunkSize, fieldsCount)), chunk, {prepare: true});
|
|
235
|
+
responses.push(response);
|
|
236
|
+
});
|
|
237
|
+
await Promise.all(responses);
|
|
238
|
+
return ;
|
|
239
|
+
}
|
|
240
|
+
catch (raw_exn){
|
|
241
|
+
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
242
|
+
if (exn.RE_EXN_ID === S$RescriptSchema.Raised) {
|
|
243
|
+
throw {
|
|
244
|
+
RE_EXN_ID: Persistence.StorageError,
|
|
245
|
+
message: "Failed to convert items for table \"" + table.tableName + "\"",
|
|
246
|
+
reason: exn,
|
|
247
|
+
Error: new Error()
|
|
248
|
+
};
|
|
249
|
+
}
|
|
250
|
+
throw {
|
|
251
|
+
RE_EXN_ID: Persistence.StorageError,
|
|
252
|
+
message: "Failed to insert items into table \"" + table.tableName + "\"",
|
|
253
|
+
reason: exn,
|
|
254
|
+
Error: new Error()
|
|
255
|
+
};
|
|
256
|
+
}
|
|
99
257
|
}
|
|
100
258
|
|
|
101
259
|
function make(sql, pgSchema, pgUser) {
|
|
@@ -103,23 +261,73 @@ function make(sql, pgSchema, pgUser) {
|
|
|
103
261
|
var schemas = await sql.unsafe("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '" + pgSchema + "';");
|
|
104
262
|
return Utils.$$Array.notEmpty(schemas);
|
|
105
263
|
};
|
|
106
|
-
var initialize = async function (
|
|
107
|
-
var
|
|
264
|
+
var initialize = async function (entitiesOpt, generalTablesOpt, enumsOpt, cleanRunOpt) {
|
|
265
|
+
var entities = entitiesOpt !== undefined ? entitiesOpt : [];
|
|
266
|
+
var generalTables = generalTablesOpt !== undefined ? generalTablesOpt : [];
|
|
267
|
+
var enums = enumsOpt !== undefined ? enumsOpt : [];
|
|
268
|
+
var cleanRun = cleanRunOpt !== undefined ? cleanRunOpt : false;
|
|
269
|
+
var queries = makeInitializeTransaction(pgSchema, pgUser, generalTables, entities, enums, cleanRun);
|
|
108
270
|
await sql.begin(function (sql) {
|
|
109
271
|
return queries.map(function (query) {
|
|
110
272
|
return sql.unsafe(query);
|
|
111
273
|
});
|
|
112
274
|
});
|
|
113
275
|
};
|
|
276
|
+
var loadByIdsOrThrow = async function (ids, table, rowsSchema) {
|
|
277
|
+
var rows;
|
|
278
|
+
try {
|
|
279
|
+
rows = await (
|
|
280
|
+
ids.length !== 1 ? sql.unsafe(makeLoadByIdsSql(pgSchema, table.tableName), [ids], {prepare: true}) : sql.unsafe(makeLoadByIdSql(pgSchema, table.tableName), ids, {prepare: true})
|
|
281
|
+
);
|
|
282
|
+
}
|
|
283
|
+
catch (raw_exn){
|
|
284
|
+
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
285
|
+
throw {
|
|
286
|
+
RE_EXN_ID: Persistence.StorageError,
|
|
287
|
+
message: "Failed loading \"" + table.tableName + "\" from storage by ids",
|
|
288
|
+
reason: exn,
|
|
289
|
+
Error: new Error()
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
try {
|
|
293
|
+
return S$RescriptSchema.parseOrThrow(rows, rowsSchema);
|
|
294
|
+
}
|
|
295
|
+
catch (raw_exn$1){
|
|
296
|
+
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn$1);
|
|
297
|
+
throw {
|
|
298
|
+
RE_EXN_ID: Persistence.StorageError,
|
|
299
|
+
message: "Failed to parse \"" + table.tableName + "\" loaded from storage by ids",
|
|
300
|
+
reason: exn$1,
|
|
301
|
+
Error: new Error()
|
|
302
|
+
};
|
|
303
|
+
}
|
|
304
|
+
};
|
|
305
|
+
var setOrThrow$1 = function (items, table, itemSchema) {
|
|
306
|
+
return setOrThrow(sql, items, table, itemSchema, pgSchema);
|
|
307
|
+
};
|
|
114
308
|
return {
|
|
115
309
|
isInitialized: isInitialized,
|
|
116
|
-
initialize: initialize
|
|
310
|
+
initialize: initialize,
|
|
311
|
+
loadByIdsOrThrow: loadByIdsOrThrow,
|
|
312
|
+
setOrThrow: setOrThrow$1
|
|
117
313
|
};
|
|
118
314
|
}
|
|
119
315
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
exports.
|
|
316
|
+
var maxItemsPerQuery = 500;
|
|
317
|
+
|
|
318
|
+
exports.makeCreateIndexSql = makeCreateIndexSql;
|
|
319
|
+
exports.makeCreateTableIndicesSql = makeCreateTableIndicesSql;
|
|
320
|
+
exports.makeCreateTableSql = makeCreateTableSql;
|
|
123
321
|
exports.makeInitializeTransaction = makeInitializeTransaction;
|
|
322
|
+
exports.makeLoadByIdSql = makeLoadByIdSql;
|
|
323
|
+
exports.makeLoadByIdsSql = makeLoadByIdsSql;
|
|
324
|
+
exports.makeInsertUnnestSetSql = makeInsertUnnestSetSql;
|
|
325
|
+
exports.makeInsertValuesSetSql = makeInsertValuesSetSql;
|
|
326
|
+
exports.rawEventsTableName = rawEventsTableName;
|
|
327
|
+
exports.maxItemsPerQuery = maxItemsPerQuery;
|
|
328
|
+
exports.makeTableBatchSetQuery = makeTableBatchSetQuery;
|
|
329
|
+
exports.chunkArray = chunkArray;
|
|
330
|
+
exports.setQueryCache = setQueryCache;
|
|
331
|
+
exports.setOrThrow = setOrThrow;
|
|
124
332
|
exports.make = make;
|
|
125
|
-
/*
|
|
333
|
+
/* setQueryCache Not a pure module */
|