envio 2.23.0 → 2.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/evm.schema.json +7 -0
- package/fuel.schema.json +7 -0
- package/package.json +5 -5
- package/src/FetchState.res +1 -4
- package/src/Persistence.res +0 -3
- package/src/Persistence.res.js +2 -3
- package/src/PgStorage.res +8 -25
- package/src/PgStorage.res.js +4 -11
- package/src/db/EntityHistory.res +5 -6
- package/src/db/EntityHistory.res.js +4 -5
- package/src/db/Table.res +1 -59
- package/src/db/Table.res.js +1 -40
package/evm.schema.json
CHANGED
|
@@ -33,6 +33,13 @@
|
|
|
33
33
|
"null"
|
|
34
34
|
]
|
|
35
35
|
},
|
|
36
|
+
"output": {
|
|
37
|
+
"description": "Path where the generated directory will be placed. By default it's 'generated' relative to the current working directory. If set, it'll be a path relative to the config file location.",
|
|
38
|
+
"type": [
|
|
39
|
+
"string",
|
|
40
|
+
"null"
|
|
41
|
+
]
|
|
42
|
+
},
|
|
36
43
|
"contracts": {
|
|
37
44
|
"description": "Global contract definitions that must contain all definitions except addresses. You can share a single handler/abi/event definitions for contracts across multiple chains.",
|
|
38
45
|
"type": [
|
package/fuel.schema.json
CHANGED
|
@@ -26,6 +26,13 @@
|
|
|
26
26
|
"null"
|
|
27
27
|
]
|
|
28
28
|
},
|
|
29
|
+
"output": {
|
|
30
|
+
"description": "Path where the generated directory will be placed. By default it's 'generated' relative to the current working directory. If set, it'll be a path relative to the config file location.",
|
|
31
|
+
"type": [
|
|
32
|
+
"string",
|
|
33
|
+
"null"
|
|
34
|
+
]
|
|
35
|
+
},
|
|
29
36
|
"contracts": {
|
|
30
37
|
"description": "Global contract definitions that must contain all definitions except addresses. You can share a single handler/abi/event definitions for contracts across multiple chains.",
|
|
31
38
|
"type": [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.
|
|
3
|
+
"version": "v2.25.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.
|
|
29
|
-
"envio-linux-arm64": "v2.
|
|
30
|
-
"envio-darwin-x64": "v2.
|
|
31
|
-
"envio-darwin-arm64": "v2.
|
|
28
|
+
"envio-linux-x64": "v2.25.0",
|
|
29
|
+
"envio-linux-arm64": "v2.25.0",
|
|
30
|
+
"envio-darwin-x64": "v2.25.0",
|
|
31
|
+
"envio-darwin-arm64": "v2.25.0"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/FetchState.res
CHANGED
|
@@ -901,10 +901,7 @@ let queueItemIsInReorgThreshold = (
|
|
|
901
901
|
if currentBlockHeight === 0 {
|
|
902
902
|
false
|
|
903
903
|
} else {
|
|
904
|
-
|
|
905
|
-
| Item(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
|
|
906
|
-
| NoItem(_) => queueItem->queueItemBlockNumber > highestBlockBelowThreshold
|
|
907
|
-
}
|
|
904
|
+
queueItem->queueItemBlockNumber > highestBlockBelowThreshold
|
|
908
905
|
}
|
|
909
906
|
}
|
|
910
907
|
|
package/src/Persistence.res
CHANGED
|
@@ -47,7 +47,6 @@ type t = {
|
|
|
47
47
|
mutable storageStatus: storageStatus,
|
|
48
48
|
storage: storage,
|
|
49
49
|
onStorageInitialize: option<unit => promise<unit>>,
|
|
50
|
-
cacheStorage: storage,
|
|
51
50
|
}
|
|
52
51
|
|
|
53
52
|
let entityHistoryActionEnumConfig: Internal.enumConfig<EntityHistory.RowAction.t> = {
|
|
@@ -64,7 +63,6 @@ let make = (
|
|
|
64
63
|
~allEnums,
|
|
65
64
|
~staticTables,
|
|
66
65
|
~storage,
|
|
67
|
-
~cacheStorage,
|
|
68
66
|
~onStorageInitialize=?,
|
|
69
67
|
) => {
|
|
70
68
|
let allEntities = userEntities->Js.Array2.concat([dcRegistryEntityConfig])
|
|
@@ -78,7 +76,6 @@ let make = (
|
|
|
78
76
|
storageStatus: Unknown,
|
|
79
77
|
storage,
|
|
80
78
|
onStorageInitialize,
|
|
81
|
-
cacheStorage,
|
|
82
79
|
}
|
|
83
80
|
}
|
|
84
81
|
|
package/src/Persistence.res.js
CHANGED
|
@@ -22,7 +22,7 @@ var entityHistoryActionEnumConfig = {
|
|
|
22
22
|
default: "SET"
|
|
23
23
|
};
|
|
24
24
|
|
|
25
|
-
function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, storage,
|
|
25
|
+
function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, storage, onStorageInitialize) {
|
|
26
26
|
var allEntities = userEntities.concat([dcRegistryEntityConfig]);
|
|
27
27
|
var allEnums$1 = allEnums.concat([entityHistoryActionEnumConfig]);
|
|
28
28
|
return {
|
|
@@ -32,8 +32,7 @@ function make(userEntities, dcRegistryEntityConfig, allEnums, staticTables, stor
|
|
|
32
32
|
allEnums: allEnums$1,
|
|
33
33
|
storageStatus: "Unknown",
|
|
34
34
|
storage: storage,
|
|
35
|
-
onStorageInitialize: onStorageInitialize
|
|
36
|
-
cacheStorage: cacheStorage
|
|
35
|
+
onStorageInitialize: onStorageInitialize
|
|
37
36
|
};
|
|
38
37
|
}
|
|
39
38
|
|
package/src/PgStorage.res
CHANGED
|
@@ -314,47 +314,30 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
314
314
|
let sqlQuery = query["sql"]
|
|
315
315
|
|
|
316
316
|
try {
|
|
317
|
-
let payload =
|
|
318
|
-
query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>))->(
|
|
319
|
-
Utils.magic: unknown => array<unknown>
|
|
320
|
-
)
|
|
321
|
-
|
|
322
317
|
if query["isInsertValues"] {
|
|
323
|
-
let
|
|
324
|
-
| S.Object({items}) => items->Array.length
|
|
325
|
-
| _ => Js.Exn.raiseError("Expected an object schema for table")
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
// Chunk the items for VALUES-based queries
|
|
329
|
-
// We need to multiply by fields count,
|
|
330
|
-
// because we flattened our entity values with S.unnest
|
|
331
|
-
// to optimize the query execution.
|
|
332
|
-
let maxChunkSize = maxItemsPerQuery * fieldsCount
|
|
333
|
-
let chunks = chunkArray(payload, ~chunkSize=maxChunkSize)
|
|
318
|
+
let chunks = chunkArray(items, ~chunkSize=maxItemsPerQuery)
|
|
334
319
|
let responses = []
|
|
335
320
|
chunks->Js.Array2.forEach(chunk => {
|
|
336
321
|
let chunkSize = chunk->Array.length
|
|
337
|
-
let isFullChunk = chunkSize ===
|
|
322
|
+
let isFullChunk = chunkSize === maxItemsPerQuery
|
|
338
323
|
|
|
339
324
|
let response = sql->Postgres.preparedUnsafe(
|
|
340
325
|
// Either use the sql query for full chunks from cache
|
|
341
326
|
// or create a new one for partial chunks on the fly.
|
|
342
327
|
isFullChunk
|
|
343
328
|
? sqlQuery
|
|
344
|
-
: makeInsertValuesSetSql(
|
|
345
|
-
|
|
346
|
-
~table,
|
|
347
|
-
~itemSchema,
|
|
348
|
-
~itemsCount=chunkSize / fieldsCount,
|
|
349
|
-
),
|
|
350
|
-
chunk->Utils.magic,
|
|
329
|
+
: makeInsertValuesSetSql(~pgSchema, ~table, ~itemSchema, ~itemsCount=chunkSize),
|
|
330
|
+
query["convertOrThrow"](chunk->(Utils.magic: array<'item> => array<unknown>)),
|
|
351
331
|
)
|
|
352
332
|
responses->Js.Array2.push(response)->ignore
|
|
353
333
|
})
|
|
354
334
|
let _ = await Promise.all(responses)
|
|
355
335
|
} else {
|
|
356
336
|
// Use UNNEST approach for single query
|
|
357
|
-
await sql->Postgres.preparedUnsafe(
|
|
337
|
+
await sql->Postgres.preparedUnsafe(
|
|
338
|
+
sqlQuery,
|
|
339
|
+
query["convertOrThrow"](items->(Utils.magic: array<'item> => array<unknown>)),
|
|
340
|
+
)
|
|
358
341
|
}
|
|
359
342
|
} catch {
|
|
360
343
|
| S.Raised(_) as exn =>
|
package/src/PgStorage.res.js
CHANGED
|
@@ -4,10 +4,8 @@
|
|
|
4
4
|
var $$Array = require("rescript/lib/js/array.js");
|
|
5
5
|
var Table = require("./db/Table.res.js");
|
|
6
6
|
var Utils = require("./Utils.res.js");
|
|
7
|
-
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
8
7
|
var Schema = require("./db/Schema.res.js");
|
|
9
8
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
10
|
-
var Caml_int32 = require("rescript/lib/js/caml_int32.js");
|
|
11
9
|
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
12
10
|
var Persistence = require("./Persistence.res.js");
|
|
13
11
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
@@ -218,20 +216,15 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
|
|
|
218
216
|
}
|
|
219
217
|
var sqlQuery = query.sql;
|
|
220
218
|
try {
|
|
221
|
-
var payload = query.convertOrThrow(items);
|
|
222
219
|
if (!query.isInsertValues) {
|
|
223
|
-
return await sql.unsafe(sqlQuery,
|
|
220
|
+
return await sql.unsafe(sqlQuery, query.convertOrThrow(items), {prepare: true});
|
|
224
221
|
}
|
|
225
|
-
var
|
|
226
|
-
var fieldsCount;
|
|
227
|
-
fieldsCount = typeof match !== "object" || match.TAG !== "object" ? Js_exn.raiseError("Expected an object schema for table") : match.items.length;
|
|
228
|
-
var maxChunkSize = Math.imul(500, fieldsCount);
|
|
229
|
-
var chunks = chunkArray(payload, maxChunkSize);
|
|
222
|
+
var chunks = chunkArray(items, 500);
|
|
230
223
|
var responses = [];
|
|
231
224
|
chunks.forEach(function (chunk) {
|
|
232
225
|
var chunkSize = chunk.length;
|
|
233
|
-
var isFullChunk = chunkSize ===
|
|
234
|
-
var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema,
|
|
226
|
+
var isFullChunk = chunkSize === 500;
|
|
227
|
+
var response = sql.unsafe(isFullChunk ? sqlQuery : makeInsertValuesSetSql(pgSchema, table, itemSchema, chunkSize), query.convertOrThrow(chunk), {prepare: true});
|
|
235
228
|
responses.push(response);
|
|
236
229
|
});
|
|
237
230
|
await Promise.all(responses);
|
package/src/db/EntityHistory.res
CHANGED
|
@@ -173,8 +173,9 @@ let batchInsertRows = (self: t<'entity>, ~sql, ~rows: array<historyRow<'entity>>
|
|
|
173
173
|
type entityInternal
|
|
174
174
|
|
|
175
175
|
external castInternal: t<'entity> => t<entityInternal> = "%identity"
|
|
176
|
+
external eval: string => 'a = "eval"
|
|
176
177
|
|
|
177
|
-
let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
178
|
+
let fromTable = (table: table, ~pgSchema, ~schema: S.t<'entity>): t<'entity> => {
|
|
178
179
|
let entity_history_block_timestamp = "entity_history_block_timestamp"
|
|
179
180
|
let entity_history_chain_id = "entity_history_chain_id"
|
|
180
181
|
let entity_history_block_number = "entity_history_block_number"
|
|
@@ -235,12 +236,10 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
|
235
236
|
let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName)
|
|
236
237
|
|
|
237
238
|
let originTableName = table.tableName
|
|
238
|
-
let originSchemaName = table.schemaName
|
|
239
239
|
let historyTableName = originTableName ++ "_history"
|
|
240
240
|
//ignore composite indices
|
|
241
241
|
let table = mkTable(
|
|
242
242
|
historyTableName,
|
|
243
|
-
~schemaName=originSchemaName,
|
|
244
243
|
~fields=Belt.Array.concatMany([
|
|
245
244
|
currentHistoryFields,
|
|
246
245
|
previousHistoryFields,
|
|
@@ -251,8 +250,8 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
|
251
250
|
|
|
252
251
|
let insertFnName = `"insert_${table.tableName}"`
|
|
253
252
|
let historyRowArg = "history_row"
|
|
254
|
-
let historyTablePath = `"${
|
|
255
|
-
let originTablePath = `"${
|
|
253
|
+
let historyTablePath = `"${pgSchema}"."${historyTableName}"`
|
|
254
|
+
let originTablePath = `"${pgSchema}"."${originTableName}"`
|
|
256
255
|
|
|
257
256
|
let previousHistoryFieldsAreNullStr =
|
|
258
257
|
previousChangeFieldNames
|
|
@@ -335,7 +334,7 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
|
335
334
|
\${shouldCopyCurrentEntity});\``
|
|
336
335
|
|
|
337
336
|
let insertFn: (Postgres.sql, Js.Json.t, ~shouldCopyCurrentEntity: bool) => promise<unit> =
|
|
338
|
-
insertFnString->
|
|
337
|
+
insertFnString->eval
|
|
339
338
|
|
|
340
339
|
let schema = makeHistoryRowSchema(schema)
|
|
341
340
|
|
|
@@ -173,7 +173,7 @@ function batchInsertRows(self, sql, rows) {
|
|
|
173
173
|
});
|
|
174
174
|
}
|
|
175
175
|
|
|
176
|
-
function fromTable(table, schema) {
|
|
176
|
+
function fromTable(table, pgSchema, schema) {
|
|
177
177
|
var currentChangeFieldNames = [
|
|
178
178
|
"entity_history_block_timestamp",
|
|
179
179
|
"entity_history_chain_id",
|
|
@@ -238,9 +238,8 @@ function fromTable(table, schema) {
|
|
|
238
238
|
return Table.getFieldName(field);
|
|
239
239
|
}));
|
|
240
240
|
var originTableName = table.tableName;
|
|
241
|
-
var originSchemaName = table.schemaName;
|
|
242
241
|
var historyTableName = originTableName + "_history";
|
|
243
|
-
var table$1 = Table.mkTable(historyTableName,
|
|
242
|
+
var table$1 = Table.mkTable(historyTableName, undefined, Belt_Array.concatMany([
|
|
244
243
|
currentHistoryFields,
|
|
245
244
|
previousHistoryFields,
|
|
246
245
|
dataFields,
|
|
@@ -251,8 +250,8 @@ function fromTable(table, schema) {
|
|
|
251
250
|
]));
|
|
252
251
|
var insertFnName = "\"insert_" + table$1.tableName + "\"";
|
|
253
252
|
var historyRowArg = "history_row";
|
|
254
|
-
var historyTablePath = "\"" +
|
|
255
|
-
var originTablePath = "\"" +
|
|
253
|
+
var historyTablePath = "\"" + pgSchema + "\".\"" + historyTableName + "\"";
|
|
254
|
+
var originTablePath = "\"" + pgSchema + "\".\"" + originTableName + "\"";
|
|
256
255
|
var previousHistoryFieldsAreNullStr = Belt_Array.map(previousChangeFieldNames, (function (fieldName) {
|
|
257
256
|
return historyRowArg + "." + fieldName + " IS NULL";
|
|
258
257
|
})).join(" OR ");
|
package/src/db/Table.res
CHANGED
|
@@ -89,14 +89,12 @@ let getFieldType = (field: field) => {
|
|
|
89
89
|
|
|
90
90
|
type table = {
|
|
91
91
|
tableName: string,
|
|
92
|
-
schemaName: string,
|
|
93
92
|
fields: array<fieldOrDerived>,
|
|
94
93
|
compositeIndices: array<array<string>>,
|
|
95
94
|
}
|
|
96
95
|
|
|
97
|
-
let mkTable = (tableName, ~
|
|
96
|
+
let mkTable = (tableName, ~compositeIndices=[], ~fields) => {
|
|
98
97
|
tableName,
|
|
99
|
-
schemaName,
|
|
100
98
|
fields,
|
|
101
99
|
compositeIndices,
|
|
102
100
|
}
|
|
@@ -299,59 +297,3 @@ let getCompositeIndices = (table): array<array<string>> => {
|
|
|
299
297
|
->getUnfilteredCompositeIndicesUnsafe
|
|
300
298
|
->Array.keep(ind => ind->Array.length > 1)
|
|
301
299
|
}
|
|
302
|
-
|
|
303
|
-
module PostgresInterop = {
|
|
304
|
-
type pgFn<'payload, 'return> = (Postgres.sql, 'payload) => promise<'return>
|
|
305
|
-
type batchSetFn<'a> = (Postgres.sql, array<'a>) => promise<unit>
|
|
306
|
-
external eval: string => 'a = "eval"
|
|
307
|
-
|
|
308
|
-
let makeBatchSetFnString = (table: table) => {
|
|
309
|
-
let fieldNamesInQuotes =
|
|
310
|
-
table->getNonDefaultFieldNames->Array.map(fieldName => `"${fieldName}"`)
|
|
311
|
-
`(sql, rows) => {
|
|
312
|
-
return sql\`
|
|
313
|
-
INSERT INTO "${table.schemaName}"."${table.tableName}"
|
|
314
|
-
\${sql(rows, ${fieldNamesInQuotes->Js.Array2.joinWith(", ")})}
|
|
315
|
-
ON CONFLICT(${table->getPrimaryKeyFieldNames->Js.Array2.joinWith(", ")}) DO UPDATE
|
|
316
|
-
SET
|
|
317
|
-
${fieldNamesInQuotes
|
|
318
|
-
->Array.map(fieldNameInQuotes => `${fieldNameInQuotes} = EXCLUDED.${fieldNameInQuotes}`)
|
|
319
|
-
->Js.Array2.joinWith(", ")};\`
|
|
320
|
-
}`
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
let chunkBatchQuery = (
|
|
324
|
-
sql,
|
|
325
|
-
entityDataArray: array<'entity>,
|
|
326
|
-
queryToExecute: pgFn<array<'entity>, 'return>,
|
|
327
|
-
~maxItemsPerQuery=500,
|
|
328
|
-
): promise<array<'return>> => {
|
|
329
|
-
let responses = []
|
|
330
|
-
let i = ref(0)
|
|
331
|
-
let shouldContinue = () => i.contents < entityDataArray->Array.length
|
|
332
|
-
// Split entityDataArray into chunks of maxItemsPerQuery
|
|
333
|
-
while shouldContinue() {
|
|
334
|
-
let chunk =
|
|
335
|
-
entityDataArray->Js.Array2.slice(~start=i.contents, ~end_=i.contents + maxItemsPerQuery)
|
|
336
|
-
let response = queryToExecute(sql, chunk)
|
|
337
|
-
responses->Js.Array2.push(response)->ignore
|
|
338
|
-
i := i.contents + maxItemsPerQuery
|
|
339
|
-
}
|
|
340
|
-
Promise.all(responses)
|
|
341
|
-
}
|
|
342
|
-
|
|
343
|
-
let makeBatchSetFn = (~table, ~schema: S.t<'a>): batchSetFn<'a> => {
|
|
344
|
-
let batchSetFn: pgFn<array<Js.Json.t>, unit> = table->makeBatchSetFnString->eval
|
|
345
|
-
let parseOrThrow = S.compile(
|
|
346
|
-
S.array(schema),
|
|
347
|
-
~input=Value,
|
|
348
|
-
~output=Json,
|
|
349
|
-
~mode=Sync,
|
|
350
|
-
~typeValidation=true,
|
|
351
|
-
)
|
|
352
|
-
async (sql, rows) => {
|
|
353
|
-
let rowsJson = rows->parseOrThrow->(Utils.magic: Js.Json.t => array<Js.Json.t>)
|
|
354
|
-
let _res = await chunkBatchQuery(sql, rowsJson, batchSetFn)
|
|
355
|
-
}
|
|
356
|
-
}
|
|
357
|
-
}
|
package/src/db/Table.res.js
CHANGED
|
@@ -72,11 +72,10 @@ function getFieldType(field) {
|
|
|
72
72
|
);
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
-
function mkTable(tableName,
|
|
75
|
+
function mkTable(tableName, compositeIndicesOpt, fields) {
|
|
76
76
|
var compositeIndices = compositeIndicesOpt !== undefined ? compositeIndicesOpt : [];
|
|
77
77
|
return {
|
|
78
78
|
tableName: tableName,
|
|
79
|
-
schemaName: schemaName,
|
|
80
79
|
fields: fields,
|
|
81
80
|
compositeIndices: compositeIndices
|
|
82
81
|
};
|
|
@@ -302,43 +301,6 @@ function getCompositeIndices(table) {
|
|
|
302
301
|
}));
|
|
303
302
|
}
|
|
304
303
|
|
|
305
|
-
function makeBatchSetFnString(table) {
|
|
306
|
-
var fieldNamesInQuotes = Belt_Array.map(getNonDefaultFieldNames(table), (function (fieldName) {
|
|
307
|
-
return "\"" + fieldName + "\"";
|
|
308
|
-
}));
|
|
309
|
-
return "(sql, rows) => {\n return sql\`\n INSERT INTO \"" + table.schemaName + "\".\"" + table.tableName + "\"\n \${sql(rows, " + fieldNamesInQuotes.join(", ") + ")}\n ON CONFLICT(" + getPrimaryKeyFieldNames(table).join(", ") + ") DO UPDATE\n SET\n " + Belt_Array.map(fieldNamesInQuotes, (function (fieldNameInQuotes) {
|
|
310
|
-
return fieldNameInQuotes + " = EXCLUDED." + fieldNameInQuotes;
|
|
311
|
-
})).join(", ") + ";\`\n }";
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
function chunkBatchQuery(sql, entityDataArray, queryToExecute, maxItemsPerQueryOpt) {
|
|
315
|
-
var maxItemsPerQuery = maxItemsPerQueryOpt !== undefined ? maxItemsPerQueryOpt : 500;
|
|
316
|
-
var responses = [];
|
|
317
|
-
var i = 0;
|
|
318
|
-
while(i < entityDataArray.length) {
|
|
319
|
-
var chunk = entityDataArray.slice(i, i + maxItemsPerQuery | 0);
|
|
320
|
-
var response = queryToExecute(sql, chunk);
|
|
321
|
-
responses.push(response);
|
|
322
|
-
i = i + maxItemsPerQuery | 0;
|
|
323
|
-
};
|
|
324
|
-
return Promise.all(responses);
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
function makeBatchSetFn(table, schema) {
|
|
328
|
-
var batchSetFn = eval(makeBatchSetFnString(table));
|
|
329
|
-
var parseOrThrow = S$RescriptSchema.compile(S$RescriptSchema.array(schema), "Output", "Json", "Sync", true);
|
|
330
|
-
return async function (sql, rows) {
|
|
331
|
-
var rowsJson = parseOrThrow(rows);
|
|
332
|
-
await chunkBatchQuery(sql, rowsJson, batchSetFn, undefined);
|
|
333
|
-
};
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
var PostgresInterop = {
|
|
337
|
-
makeBatchSetFnString: makeBatchSetFnString,
|
|
338
|
-
chunkBatchQuery: chunkBatchQuery,
|
|
339
|
-
makeBatchSetFn: makeBatchSetFn
|
|
340
|
-
};
|
|
341
|
-
|
|
342
304
|
exports.mkField = mkField;
|
|
343
305
|
exports.mkDerivedFromField = mkDerivedFromField;
|
|
344
306
|
exports.getUserDefinedFieldName = getUserDefinedFieldName;
|
|
@@ -361,5 +323,4 @@ exports.getUnfilteredCompositeIndicesUnsafe = getUnfilteredCompositeIndicesUnsaf
|
|
|
361
323
|
exports.toSqlParams = toSqlParams;
|
|
362
324
|
exports.getSingleIndices = getSingleIndices;
|
|
363
325
|
exports.getCompositeIndices = getCompositeIndices;
|
|
364
|
-
exports.PostgresInterop = PostgresInterop;
|
|
365
326
|
/* BigInt Not a pure module */
|