envio 2.31.0-alpha.3 → 2.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/Envio.res +1 -2
- package/src/Envio.res.js +1 -3
- package/src/FetchState.res +16 -7
- package/src/FetchState.res.js +12 -5
- package/src/Internal.res +6 -3
- package/src/Internal.res.js +13 -2
- package/src/Persistence.res +8 -2
- package/src/Persistence.res.js +2 -2
- package/src/PgStorage.res +17 -6
- package/src/PgStorage.res.js +16 -10
- package/src/Prometheus.res +23 -24
- package/src/Prometheus.res.js +55 -52
- package/src/SafeCheckpointTracking.res +22 -23
- package/src/SafeCheckpointTracking.res.js +7 -3
- package/src/db/EntityHistory.res +2 -3
- package/src/db/EntityHistory.res.js +4 -1
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.31.0
|
|
3
|
+
"version": "v2.31.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.31.0
|
|
29
|
-
"envio-linux-arm64": "v2.31.0
|
|
30
|
-
"envio-darwin-x64": "v2.31.0
|
|
31
|
-
"envio-darwin-arm64": "v2.31.0
|
|
28
|
+
"envio-linux-x64": "v2.31.0",
|
|
29
|
+
"envio-linux-arm64": "v2.31.0",
|
|
30
|
+
"envio-darwin-x64": "v2.31.0",
|
|
31
|
+
"envio-darwin-arm64": "v2.31.0"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.6",
|
package/src/Envio.res
CHANGED
|
@@ -59,7 +59,6 @@ let experimental_createEffect = (
|
|
|
59
59
|
options: effectOptions<'input, 'output>,
|
|
60
60
|
handler: effectArgs<'input> => promise<'output>,
|
|
61
61
|
) => {
|
|
62
|
-
Prometheus.EffectCallsCount.set(~callsCount=0, ~effectName=options.name)
|
|
63
62
|
let outputSchema =
|
|
64
63
|
S.schema(_ => options.output)->(Utils.magic: S.t<S.t<'output>> => S.t<Internal.effectOutput>)
|
|
65
64
|
{
|
|
@@ -86,7 +85,7 @@ let experimental_createEffect = (
|
|
|
86
85
|
})
|
|
87
86
|
Some({
|
|
88
87
|
table: Internal.makeCacheTable(~effectName=options.name),
|
|
89
|
-
|
|
88
|
+
outputSchema,
|
|
90
89
|
itemSchema,
|
|
91
90
|
})
|
|
92
91
|
| None
|
package/src/Envio.res.js
CHANGED
|
@@ -2,11 +2,9 @@
|
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
4
|
var Internal = require("./Internal.res.js");
|
|
5
|
-
var Prometheus = require("./Prometheus.res.js");
|
|
6
5
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
7
6
|
|
|
8
7
|
function experimental_createEffect(options, handler) {
|
|
9
|
-
Prometheus.EffectCallsCount.set(0, options.name);
|
|
10
8
|
var outputSchema = S$RescriptSchema.schema(function (param) {
|
|
11
9
|
return options.output;
|
|
12
10
|
});
|
|
@@ -21,7 +19,7 @@ function experimental_createEffect(options, handler) {
|
|
|
21
19
|
});
|
|
22
20
|
tmp = {
|
|
23
21
|
itemSchema: itemSchema,
|
|
24
|
-
|
|
22
|
+
outputSchema: outputSchema,
|
|
25
23
|
table: Internal.makeCacheTable(options.name)
|
|
26
24
|
};
|
|
27
25
|
} else {
|
package/src/FetchState.res
CHANGED
|
@@ -351,8 +351,11 @@ let registerDynamicContracts = (
|
|
|
351
351
|
switch item->Internal.getItemDcs {
|
|
352
352
|
| None => ()
|
|
353
353
|
| Some(dcs) =>
|
|
354
|
-
|
|
355
|
-
|
|
354
|
+
let idx = ref(0)
|
|
355
|
+
while idx.contents < dcs->Array.length {
|
|
356
|
+
let dc = dcs->Js.Array2.unsafe_get(idx.contents)
|
|
357
|
+
|
|
358
|
+
let shouldRemove = ref(false)
|
|
356
359
|
|
|
357
360
|
switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) {
|
|
358
361
|
| Some({filterByAddresses}) =>
|
|
@@ -378,8 +381,7 @@ let registerDynamicContracts = (
|
|
|
378
381
|
)
|
|
379
382
|
logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`)
|
|
380
383
|
}
|
|
381
|
-
|
|
382
|
-
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
|
|
384
|
+
shouldRemove := true
|
|
383
385
|
| None =>
|
|
384
386
|
let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption(
|
|
385
387
|
dc.address->Address.toString,
|
|
@@ -401,8 +403,7 @@ let registerDynamicContracts = (
|
|
|
401
403
|
Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock)
|
|
402
404
|
registeringContracts->Js.Dict.set(dc.address->Address.toString, dc)
|
|
403
405
|
} else {
|
|
404
|
-
|
|
405
|
-
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
|
|
406
|
+
shouldRemove := true
|
|
406
407
|
}
|
|
407
408
|
}
|
|
408
409
|
| None => {
|
|
@@ -414,9 +415,17 @@ let registerDynamicContracts = (
|
|
|
414
415
|
},
|
|
415
416
|
)
|
|
416
417
|
logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`)
|
|
417
|
-
|
|
418
|
+
shouldRemove := true
|
|
418
419
|
}
|
|
419
420
|
}
|
|
421
|
+
|
|
422
|
+
if shouldRemove.contents {
|
|
423
|
+
// Remove the DC from item to prevent it from saving to the db
|
|
424
|
+
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx.contents)
|
|
425
|
+
// Don't increment idx - next element shifted into current position
|
|
426
|
+
} else {
|
|
427
|
+
idx := idx.contents + 1
|
|
428
|
+
}
|
|
420
429
|
}
|
|
421
430
|
}
|
|
422
431
|
}
|
package/src/FetchState.res.js
CHANGED
|
@@ -237,8 +237,10 @@ function registerDynamicContracts(fetchState, items) {
|
|
|
237
237
|
var item = items[itemIdx];
|
|
238
238
|
var dcs = item.dcs;
|
|
239
239
|
if (dcs !== undefined) {
|
|
240
|
-
|
|
240
|
+
var idx = 0;
|
|
241
|
+
while(idx < dcs.length) {
|
|
241
242
|
var dc = dcs[idx];
|
|
243
|
+
var shouldRemove = false;
|
|
242
244
|
var match = fetchState.contractConfigs[dc.contractName];
|
|
243
245
|
if (match !== undefined) {
|
|
244
246
|
var existingContract = indexingContracts[dc.address];
|
|
@@ -254,7 +256,7 @@ function registerDynamicContracts(fetchState, items) {
|
|
|
254
256
|
});
|
|
255
257
|
Logging.childWarn(logger, "Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.");
|
|
256
258
|
}
|
|
257
|
-
|
|
259
|
+
shouldRemove = true;
|
|
258
260
|
} else {
|
|
259
261
|
var registeringContract = registeringContracts[dc.address];
|
|
260
262
|
var shouldUpdate;
|
|
@@ -274,7 +276,7 @@ function registerDynamicContracts(fetchState, items) {
|
|
|
274
276
|
earliestRegisteringEventBlockNumber = earliestRegisteringEventBlockNumber < dc.startBlock ? earliestRegisteringEventBlockNumber : dc.startBlock;
|
|
275
277
|
registeringContracts[dc.address] = dc;
|
|
276
278
|
} else {
|
|
277
|
-
|
|
279
|
+
shouldRemove = true;
|
|
278
280
|
}
|
|
279
281
|
}
|
|
280
282
|
} else {
|
|
@@ -284,9 +286,14 @@ function registerDynamicContracts(fetchState, items) {
|
|
|
284
286
|
contractName: dc.contractName
|
|
285
287
|
});
|
|
286
288
|
Logging.childWarn(logger$1, "Skipping contract registration: Contract doesn't have any events to fetch.");
|
|
289
|
+
shouldRemove = true;
|
|
290
|
+
}
|
|
291
|
+
if (shouldRemove) {
|
|
287
292
|
dcs.splice(idx, 1);
|
|
293
|
+
} else {
|
|
294
|
+
idx = idx + 1 | 0;
|
|
288
295
|
}
|
|
289
|
-
}
|
|
296
|
+
};
|
|
290
297
|
}
|
|
291
298
|
|
|
292
299
|
}
|
|
@@ -331,7 +338,7 @@ function registerDynamicContracts(fetchState, items) {
|
|
|
331
338
|
addressesByContractName: pendingAddressesByContractName.contents
|
|
332
339
|
});
|
|
333
340
|
};
|
|
334
|
-
for(var idx$1 = 0 ,idx_finish
|
|
341
|
+
for(var idx$1 = 0 ,idx_finish = Object.keys(addressesByContractName).length; idx$1 < idx_finish; ++idx$1){
|
|
335
342
|
var contractName = Object.keys(addressesByContractName)[idx$1];
|
|
336
343
|
var addresses = addressesByContractName[contractName];
|
|
337
344
|
var contractConfig = fetchState.contractConfigs[contractName];
|
package/src/Internal.res
CHANGED
|
@@ -290,7 +290,7 @@ type effectArgs = {
|
|
|
290
290
|
type effectCacheItem = {id: string, output: effectOutput}
|
|
291
291
|
type effectCacheMeta = {
|
|
292
292
|
itemSchema: S.t<effectCacheItem>,
|
|
293
|
-
|
|
293
|
+
outputSchema: S.t<effectOutput>,
|
|
294
294
|
table: Table.table,
|
|
295
295
|
}
|
|
296
296
|
type effect = {
|
|
@@ -302,14 +302,17 @@ type effect = {
|
|
|
302
302
|
mutable callsCount: int,
|
|
303
303
|
}
|
|
304
304
|
let cacheTablePrefix = "envio_effect_"
|
|
305
|
+
let cacheOutputSchema = S.json(~validate=false)->(Utils.magic: S.t<Js.Json.t> => S.t<effectOutput>)
|
|
306
|
+
let effectCacheItemRowsSchema = S.array(
|
|
307
|
+
S.schema(s => {id: s.matches(S.string), output: s.matches(cacheOutputSchema)}),
|
|
308
|
+
)
|
|
305
309
|
let makeCacheTable = (~effectName) => {
|
|
306
310
|
Table.mkTable(
|
|
307
311
|
cacheTablePrefix ++ effectName,
|
|
308
312
|
~fields=[
|
|
309
313
|
Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true),
|
|
310
|
-
Table.mkField("output", JsonB, ~fieldSchema=
|
|
314
|
+
Table.mkField("output", JsonB, ~fieldSchema=cacheOutputSchema, ~isNullable=true),
|
|
311
315
|
],
|
|
312
|
-
~compositeIndices=[],
|
|
313
316
|
)
|
|
314
317
|
}
|
|
315
318
|
|
package/src/Internal.res.js
CHANGED
|
@@ -36,10 +36,19 @@ function makeEnumConfig(name, variants) {
|
|
|
36
36
|
|
|
37
37
|
var cacheTablePrefix = "envio_effect_";
|
|
38
38
|
|
|
39
|
+
var cacheOutputSchema = S$RescriptSchema.json(false);
|
|
40
|
+
|
|
41
|
+
var effectCacheItemRowsSchema = S$RescriptSchema.array(S$RescriptSchema.schema(function (s) {
|
|
42
|
+
return {
|
|
43
|
+
id: s.m(S$RescriptSchema.string),
|
|
44
|
+
output: s.m(cacheOutputSchema)
|
|
45
|
+
};
|
|
46
|
+
}));
|
|
47
|
+
|
|
39
48
|
function makeCacheTable(effectName) {
|
|
40
|
-
return Table.mkTable(cacheTablePrefix + effectName,
|
|
49
|
+
return Table.mkTable(cacheTablePrefix + effectName, undefined, [
|
|
41
50
|
Table.mkField("id", "TEXT", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
|
|
42
|
-
Table.mkField("output", "JSONB",
|
|
51
|
+
Table.mkField("output", "JSONB", cacheOutputSchema, undefined, undefined, true, undefined, undefined, undefined)
|
|
43
52
|
]);
|
|
44
53
|
}
|
|
45
54
|
|
|
@@ -47,5 +56,7 @@ exports.fuelSupplyParamsSchema = fuelSupplyParamsSchema;
|
|
|
47
56
|
exports.fuelTransferParamsSchema = fuelTransferParamsSchema;
|
|
48
57
|
exports.makeEnumConfig = makeEnumConfig;
|
|
49
58
|
exports.cacheTablePrefix = cacheTablePrefix;
|
|
59
|
+
exports.cacheOutputSchema = cacheOutputSchema;
|
|
60
|
+
exports.effectCacheItemRowsSchema = effectCacheItemRowsSchema;
|
|
50
61
|
exports.makeCacheTable = makeCacheTable;
|
|
51
62
|
/* fuelSupplyParamsSchema Not a pure module */
|
package/src/Persistence.res
CHANGED
|
@@ -192,7 +192,12 @@ let getInitializedState = persistence => {
|
|
|
192
192
|
}
|
|
193
193
|
}
|
|
194
194
|
|
|
195
|
-
let setEffectCacheOrThrow = async (
|
|
195
|
+
let setEffectCacheOrThrow = async (
|
|
196
|
+
persistence,
|
|
197
|
+
~effect: Internal.effect,
|
|
198
|
+
~items,
|
|
199
|
+
~invalidationsCount,
|
|
200
|
+
) => {
|
|
196
201
|
switch persistence.storageStatus {
|
|
197
202
|
| Unknown
|
|
198
203
|
| Initializing(_) =>
|
|
@@ -210,7 +215,8 @@ let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items
|
|
|
210
215
|
}
|
|
211
216
|
let initialize = effectCacheRecord.count === 0
|
|
212
217
|
await storage.setEffectCacheOrThrow(~effect, ~items, ~initialize)
|
|
213
|
-
effectCacheRecord.count =
|
|
218
|
+
effectCacheRecord.count =
|
|
219
|
+
effectCacheRecord.count + items->Js.Array2.length - invalidationsCount
|
|
214
220
|
Prometheus.EffectCacheCount.set(~count=effectCacheRecord.count, ~effectName)
|
|
215
221
|
}
|
|
216
222
|
}
|
package/src/Persistence.res.js
CHANGED
|
@@ -119,7 +119,7 @@ function getInitializedState(persistence) {
|
|
|
119
119
|
}
|
|
120
120
|
}
|
|
121
121
|
|
|
122
|
-
async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
122
|
+
async function setEffectCacheOrThrow(persistence, effect, items, invalidationsCount) {
|
|
123
123
|
var match = persistence.storageStatus;
|
|
124
124
|
if (typeof match !== "object") {
|
|
125
125
|
return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
|
|
@@ -144,7 +144,7 @@ async function setEffectCacheOrThrow(persistence, effect, items) {
|
|
|
144
144
|
}
|
|
145
145
|
var initialize = effectCacheRecord.count === 0;
|
|
146
146
|
await storage.setEffectCacheOrThrow(effect, items, initialize);
|
|
147
|
-
effectCacheRecord.count = effectCacheRecord.count + items.length | 0;
|
|
147
|
+
effectCacheRecord.count = (effectCacheRecord.count + items.length | 0) - invalidationsCount | 0;
|
|
148
148
|
return Prometheus.EffectCacheCount.set(effectCacheRecord.count, effectName);
|
|
149
149
|
}
|
|
150
150
|
|
package/src/PgStorage.res
CHANGED
|
@@ -22,7 +22,7 @@ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
|
|
|
22
22
|
compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
-
let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
|
|
25
|
+
let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText) => {
|
|
26
26
|
open Belt
|
|
27
27
|
let fieldsMapped =
|
|
28
28
|
table
|
|
@@ -34,6 +34,8 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
|
|
|
34
34
|
{
|
|
35
35
|
`"${fieldName}" ${switch fieldType {
|
|
36
36
|
| Custom(name) if !(name->Js.String2.startsWith("NUMERIC(")) => `"${pgSchema}".${name}`
|
|
37
|
+
// Workaround for Hasura bug https://github.com/enviodev/hyperindex/issues/788
|
|
38
|
+
| Numeric if isArray && isNumericArrayAsText => (Table.Text :> string)
|
|
37
39
|
| _ => (fieldType :> string)
|
|
38
40
|
}}${isArray ? "[]" : ""}${switch defaultValue {
|
|
39
41
|
| Some(defaultValue) => ` DEFAULT ${defaultValue}`
|
|
@@ -57,6 +59,7 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
|
|
|
57
59
|
let makeInitializeTransaction = (
|
|
58
60
|
~pgSchema,
|
|
59
61
|
~pgUser,
|
|
62
|
+
~isHasuraEnabled,
|
|
60
63
|
~chainConfigs=[],
|
|
61
64
|
~entities=[],
|
|
62
65
|
~enums=[],
|
|
@@ -105,7 +108,10 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
|
|
|
105
108
|
|
|
106
109
|
// Batch all table creation first (optimal for PostgreSQL)
|
|
107
110
|
allTables->Js.Array2.forEach((table: Table.table) => {
|
|
108
|
-
query :=
|
|
111
|
+
query :=
|
|
112
|
+
query.contents ++
|
|
113
|
+
"\n" ++
|
|
114
|
+
makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=isHasuraEnabled)
|
|
109
115
|
})
|
|
110
116
|
|
|
111
117
|
// Then batch all indices (better performance when tables exist)
|
|
@@ -263,7 +269,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
|
|
|
263
269
|
// Currently history update table uses S.object with transformation for schema,
|
|
264
270
|
// which is being lossed during conversion to dbSchema.
|
|
265
271
|
// So use simple insert values for now.
|
|
266
|
-
let isHistoryUpdate = table.tableName->Js.String2.startsWith(
|
|
272
|
+
let isHistoryUpdate = table.tableName->Js.String2.startsWith(EntityHistory.historyTablePrefix)
|
|
267
273
|
|
|
268
274
|
// Should experiment how much it'll affect performance
|
|
269
275
|
// Although, it should be fine not to perform the validation check,
|
|
@@ -329,7 +335,7 @@ let removeInvalidUtf8InPlace = entities =>
|
|
|
329
335
|
// This is unsafe, but we rely that it'll use
|
|
330
336
|
// the mutated reference on retry.
|
|
331
337
|
// TODO: Test it properly after we start using
|
|
332
|
-
//
|
|
338
|
+
// real pg for indexer test framework.
|
|
333
339
|
dict->Js.Dict.set(
|
|
334
340
|
key,
|
|
335
341
|
value
|
|
@@ -507,6 +513,7 @@ let make = (
|
|
|
507
513
|
~pgUser,
|
|
508
514
|
~pgDatabase,
|
|
509
515
|
~pgPassword,
|
|
516
|
+
~isHasuraEnabled,
|
|
510
517
|
~onInitialize=?,
|
|
511
518
|
~onNewTables=?,
|
|
512
519
|
): Persistence.storage => {
|
|
@@ -552,7 +559,7 @@ let make = (
|
|
|
552
559
|
let table = Internal.makeCacheTable(~effectName)
|
|
553
560
|
|
|
554
561
|
sql
|
|
555
|
-
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
|
|
562
|
+
->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false))
|
|
556
563
|
->Promise.then(() => {
|
|
557
564
|
let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
|
|
558
565
|
|
|
@@ -645,6 +652,7 @@ let make = (
|
|
|
645
652
|
~enums,
|
|
646
653
|
~chainConfigs,
|
|
647
654
|
~isEmptyPgSchema=schemaTableNames->Utils.Array.isEmpty,
|
|
655
|
+
~isHasuraEnabled,
|
|
648
656
|
)
|
|
649
657
|
// Execute all queries within a single transaction for integrity
|
|
650
658
|
let _ = await sql->Postgres.beginSql(sql => {
|
|
@@ -790,7 +798,10 @@ let make = (
|
|
|
790
798
|
}
|
|
791
799
|
|
|
792
800
|
if initialize {
|
|
793
|
-
let _ =
|
|
801
|
+
let _ =
|
|
802
|
+
await sql->Postgres.unsafe(
|
|
803
|
+
makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false),
|
|
804
|
+
)
|
|
794
805
|
// Integration with other tools like Hasura
|
|
795
806
|
switch onNewTables {
|
|
796
807
|
| Some(onNewTables) => await onNewTables(~tableNames=[table.tableName])
|
package/src/PgStorage.res.js
CHANGED
|
@@ -15,6 +15,7 @@ var Internal = require("./Internal.res.js");
|
|
|
15
15
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
16
16
|
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
17
17
|
var Persistence = require("./Persistence.res.js");
|
|
18
|
+
var EntityHistory = require("./db/EntityHistory.res.js");
|
|
18
19
|
var InternalTable = require("./db/InternalTable.res.js");
|
|
19
20
|
var Child_process = require("child_process");
|
|
20
21
|
var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
|
|
@@ -44,15 +45,20 @@ function makeCreateTableIndicesQuery(table, pgSchema) {
|
|
|
44
45
|
return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
|
|
45
46
|
}
|
|
46
47
|
|
|
47
|
-
function makeCreateTableQuery(table, pgSchema) {
|
|
48
|
+
function makeCreateTableQuery(table, pgSchema, isNumericArrayAsText) {
|
|
48
49
|
var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
|
|
49
50
|
var defaultValue = field.defaultValue;
|
|
51
|
+
var isArray = field.isArray;
|
|
50
52
|
var fieldType = field.fieldType;
|
|
51
53
|
var fieldName = Table.getDbFieldName(field);
|
|
52
54
|
var tmp;
|
|
53
|
-
tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "BIGINT" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL"
|
|
55
|
+
tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "BIGINT" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
|
|
56
|
+
fieldType === "NUMERIC" && isArray && isNumericArrayAsText ? "TEXT" : fieldType
|
|
57
|
+
) : (
|
|
58
|
+
fieldType.startsWith("NUMERIC(") ? fieldType : "\"" + pgSchema + "\"." + fieldType
|
|
59
|
+
);
|
|
54
60
|
return "\"" + fieldName + "\" " + tmp + (
|
|
55
|
-
|
|
61
|
+
isArray ? "[]" : ""
|
|
56
62
|
) + (
|
|
57
63
|
defaultValue !== undefined ? " DEFAULT " + defaultValue : (
|
|
58
64
|
field.isNullable ? "" : " NOT NULL"
|
|
@@ -68,7 +74,7 @@ function makeCreateTableQuery(table, pgSchema) {
|
|
|
68
74
|
) + ");";
|
|
69
75
|
}
|
|
70
76
|
|
|
71
|
-
function makeInitializeTransaction(pgSchema, pgUser, chainConfigsOpt, entitiesOpt, enumsOpt, isEmptyPgSchemaOpt) {
|
|
77
|
+
function makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigsOpt, entitiesOpt, enumsOpt, isEmptyPgSchemaOpt) {
|
|
72
78
|
var chainConfigs = chainConfigsOpt !== undefined ? chainConfigsOpt : [];
|
|
73
79
|
var entities = entitiesOpt !== undefined ? entitiesOpt : [];
|
|
74
80
|
var enums = enumsOpt !== undefined ? enumsOpt : [];
|
|
@@ -99,7 +105,7 @@ function makeInitializeTransaction(pgSchema, pgUser, chainConfigsOpt, entitiesOp
|
|
|
99
105
|
query.contents = query.contents + "\n" + enumCreateQuery;
|
|
100
106
|
});
|
|
101
107
|
allTables.forEach(function (table) {
|
|
102
|
-
query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema);
|
|
108
|
+
query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema, isHasuraEnabled);
|
|
103
109
|
});
|
|
104
110
|
allTables.forEach(function (table) {
|
|
105
111
|
var indices = makeCreateTableIndicesQuery(table, pgSchema);
|
|
@@ -194,7 +200,7 @@ function makeInsertValuesSetQuery(pgSchema, table, itemSchema, itemsCount) {
|
|
|
194
200
|
function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
|
|
195
201
|
var match = Table.toSqlParams(table, itemSchema, pgSchema);
|
|
196
202
|
var isRawEvents = table.tableName === InternalTable.RawEvents.table.tableName;
|
|
197
|
-
var isHistoryUpdate = table.tableName.startsWith(
|
|
203
|
+
var isHistoryUpdate = table.tableName.startsWith(EntityHistory.historyTablePrefix);
|
|
198
204
|
if ((isRawEvents || !match.hasArrayField) && !isHistoryUpdate) {
|
|
199
205
|
return {
|
|
200
206
|
query: makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents),
|
|
@@ -354,7 +360,7 @@ async function getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort) {
|
|
|
354
360
|
return result;
|
|
355
361
|
}
|
|
356
362
|
|
|
357
|
-
function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onInitialize, onNewTables) {
|
|
363
|
+
function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, isHasuraEnabled, onInitialize, onNewTables) {
|
|
358
364
|
var psqlExecOptions_env = Js_dict.fromArray([
|
|
359
365
|
[
|
|
360
366
|
"PGPASSWORD",
|
|
@@ -403,7 +409,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
403
409
|
await Promise.all(cacheFiles.map(function (entry) {
|
|
404
410
|
var effectName = entry.slice(0, -4);
|
|
405
411
|
var table = Internal.makeCacheTable(effectName);
|
|
406
|
-
return sql.unsafe(makeCreateTableQuery(table, pgSchema)).then(function () {
|
|
412
|
+
return sql.unsafe(makeCreateTableQuery(table, pgSchema, false)).then(function () {
|
|
407
413
|
var inputFile = Path.join(cacheDirPath, entry);
|
|
408
414
|
var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + table.tableName + "\" FROM STDIN WITH (FORMAT text, HEADER);' < " + inputFile;
|
|
409
415
|
return new Promise((function (resolve, reject) {
|
|
@@ -461,7 +467,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
461
467
|
})) {
|
|
462
468
|
Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
|
|
463
469
|
}
|
|
464
|
-
var queries = makeInitializeTransaction(pgSchema, pgUser, chainConfigs, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
|
|
470
|
+
var queries = makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigs, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
|
|
465
471
|
await sql.begin(function (sql) {
|
|
466
472
|
return Promise.all(queries.map(function (query) {
|
|
467
473
|
return sql.unsafe(query);
|
|
@@ -568,7 +574,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
568
574
|
var match = cacheMeta !== undefined ? cacheMeta : Js_exn.raiseError("Failed to set effect cache for \"" + effect.name + "\". Effect has no cache enabled.");
|
|
569
575
|
var table = match.table;
|
|
570
576
|
if (initialize) {
|
|
571
|
-
await sql.unsafe(makeCreateTableQuery(table, pgSchema));
|
|
577
|
+
await sql.unsafe(makeCreateTableQuery(table, pgSchema, false));
|
|
572
578
|
if (onNewTables !== undefined) {
|
|
573
579
|
await onNewTables([table.tableName]);
|
|
574
580
|
}
|
package/src/Prometheus.res
CHANGED
|
@@ -525,30 +525,6 @@ module RollbackTargetBlockNumber = {
|
|
|
525
525
|
}
|
|
526
526
|
}
|
|
527
527
|
|
|
528
|
-
module ProcessingBlockNumber = {
|
|
529
|
-
let gauge = SafeGauge.makeOrThrow(
|
|
530
|
-
~name="envio_processing_block_number",
|
|
531
|
-
~help="The latest item block number included in the currently processing batch for the chain.",
|
|
532
|
-
~labelSchema=chainIdLabelsSchema,
|
|
533
|
-
)
|
|
534
|
-
|
|
535
|
-
let set = (~blockNumber, ~chainId) => {
|
|
536
|
-
gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
module ProcessingBatchSize = {
|
|
541
|
-
let gauge = SafeGauge.makeOrThrow(
|
|
542
|
-
~name="envio_processing_batch_size",
|
|
543
|
-
~help="The number of items included in the currently processing batch for the chain.",
|
|
544
|
-
~labelSchema=chainIdLabelsSchema,
|
|
545
|
-
)
|
|
546
|
-
|
|
547
|
-
let set = (~batchSize, ~chainId) => {
|
|
548
|
-
gauge->SafeGauge.handleInt(~labels=chainId, ~value=batchSize)
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
|
|
552
528
|
module ProcessingMaxBatchSize = {
|
|
553
529
|
let gauge = PromClient.Gauge.makeGauge({
|
|
554
530
|
"name": "envio_processing_max_batch_size",
|
|
@@ -593,6 +569,17 @@ module ProgressEventsCount = {
|
|
|
593
569
|
}
|
|
594
570
|
}
|
|
595
571
|
|
|
572
|
+
module ProgressBatchCount = {
|
|
573
|
+
let counter = PromClient.Counter.makeCounter({
|
|
574
|
+
"name": "envio_progress_batches_count",
|
|
575
|
+
"help": "The number of batches processed and reflected in the database.",
|
|
576
|
+
})
|
|
577
|
+
|
|
578
|
+
let increment = () => {
|
|
579
|
+
counter->PromClient.Counter.inc
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
|
|
596
583
|
let effectLabelsSchema = S.object(s => {
|
|
597
584
|
s.field("effect", S.string)
|
|
598
585
|
})
|
|
@@ -621,6 +608,18 @@ module EffectCacheCount = {
|
|
|
621
608
|
}
|
|
622
609
|
}
|
|
623
610
|
|
|
611
|
+
module EffectCacheInvalidationsCount = {
|
|
612
|
+
let counter = SafeCounter.makeOrThrow(
|
|
613
|
+
~name="envio_effect_cache_invalidations_count",
|
|
614
|
+
~help="The number of effect cache invalidations.",
|
|
615
|
+
~labelSchema=effectLabelsSchema,
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
let increment = (~effectName) => {
|
|
619
|
+
counter->SafeCounter.increment(~labels=effectName)
|
|
620
|
+
}
|
|
621
|
+
}
|
|
622
|
+
|
|
624
623
|
module StorageLoad = {
|
|
625
624
|
let operationLabelsSchema = S.object(s => s.field("operation", S.string))
|
|
626
625
|
|
package/src/Prometheus.res.js
CHANGED
|
@@ -628,51 +628,29 @@ var RollbackTargetBlockNumber = {
|
|
|
628
628
|
set: set$15
|
|
629
629
|
};
|
|
630
630
|
|
|
631
|
-
var gauge$17 =
|
|
632
|
-
|
|
633
|
-
function set$16(blockNumber, chainId) {
|
|
634
|
-
handleInt$1(gauge$17, chainId, blockNumber);
|
|
635
|
-
}
|
|
636
|
-
|
|
637
|
-
var ProcessingBlockNumber = {
|
|
638
|
-
gauge: gauge$17,
|
|
639
|
-
set: set$16
|
|
640
|
-
};
|
|
641
|
-
|
|
642
|
-
var gauge$18 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
|
|
643
|
-
|
|
644
|
-
function set$17(batchSize, chainId) {
|
|
645
|
-
handleInt$1(gauge$18, chainId, batchSize);
|
|
646
|
-
}
|
|
647
|
-
|
|
648
|
-
var ProcessingBatchSize = {
|
|
649
|
-
gauge: gauge$18,
|
|
650
|
-
set: set$17
|
|
651
|
-
};
|
|
652
|
-
|
|
653
|
-
var gauge$19 = new PromClient.Gauge({
|
|
631
|
+
var gauge$17 = new PromClient.Gauge({
|
|
654
632
|
name: "envio_processing_max_batch_size",
|
|
655
633
|
help: "The maximum number of items to process in a single batch."
|
|
656
634
|
});
|
|
657
635
|
|
|
658
|
-
function set$
|
|
659
|
-
gauge$
|
|
636
|
+
function set$16(maxBatchSize) {
|
|
637
|
+
gauge$17.set(maxBatchSize);
|
|
660
638
|
}
|
|
661
639
|
|
|
662
640
|
var ProcessingMaxBatchSize = {
|
|
663
|
-
gauge: gauge$
|
|
664
|
-
set: set$
|
|
641
|
+
gauge: gauge$17,
|
|
642
|
+
set: set$16
|
|
665
643
|
};
|
|
666
644
|
|
|
667
|
-
var gauge$
|
|
645
|
+
var gauge$18 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
|
|
668
646
|
|
|
669
|
-
function set$
|
|
670
|
-
handleInt$1(gauge$
|
|
647
|
+
function set$17(blockNumber, chainId) {
|
|
648
|
+
handleInt$1(gauge$18, chainId, blockNumber);
|
|
671
649
|
}
|
|
672
650
|
|
|
673
651
|
var ProgressBlockNumber = {
|
|
674
|
-
gauge: gauge$
|
|
675
|
-
set: set$
|
|
652
|
+
gauge: gauge$18,
|
|
653
|
+
set: set$17
|
|
676
654
|
};
|
|
677
655
|
|
|
678
656
|
var deprecatedGauge$1 = new PromClient.Gauge({
|
|
@@ -681,45 +659,70 @@ var deprecatedGauge$1 = new PromClient.Gauge({
|
|
|
681
659
|
labelNames: ["chainId"]
|
|
682
660
|
});
|
|
683
661
|
|
|
684
|
-
var gauge$
|
|
662
|
+
var gauge$19 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
|
|
685
663
|
|
|
686
|
-
function set$
|
|
664
|
+
function set$18(processedCount, chainId) {
|
|
687
665
|
deprecatedGauge$1.labels({
|
|
688
666
|
chainId: chainId
|
|
689
667
|
}).set(processedCount);
|
|
690
|
-
handleInt$1(gauge$
|
|
668
|
+
handleInt$1(gauge$19, chainId, processedCount);
|
|
691
669
|
}
|
|
692
670
|
|
|
693
671
|
var ProgressEventsCount = {
|
|
694
672
|
deprecatedGauge: deprecatedGauge$1,
|
|
695
|
-
gauge: gauge$
|
|
696
|
-
set: set$
|
|
673
|
+
gauge: gauge$19,
|
|
674
|
+
set: set$18
|
|
675
|
+
};
|
|
676
|
+
|
|
677
|
+
var counter$5 = new PromClient.Counter({
|
|
678
|
+
name: "envio_progress_batches_count",
|
|
679
|
+
help: "The number of batches processed and reflected in the database."
|
|
680
|
+
});
|
|
681
|
+
|
|
682
|
+
function increment$5() {
|
|
683
|
+
counter$5.inc();
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
var ProgressBatchCount = {
|
|
687
|
+
counter: counter$5,
|
|
688
|
+
increment: increment$5
|
|
697
689
|
};
|
|
698
690
|
|
|
699
691
|
var effectLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
700
692
|
return s.f("effect", S$RescriptSchema.string);
|
|
701
693
|
});
|
|
702
694
|
|
|
703
|
-
var gauge$
|
|
695
|
+
var gauge$20 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
|
|
704
696
|
|
|
705
|
-
function set$
|
|
706
|
-
handleInt$1(gauge$
|
|
697
|
+
function set$19(callsCount, effectName) {
|
|
698
|
+
handleInt$1(gauge$20, effectName, callsCount);
|
|
707
699
|
}
|
|
708
700
|
|
|
709
701
|
var EffectCallsCount = {
|
|
710
|
-
gauge: gauge$
|
|
711
|
-
set: set$
|
|
702
|
+
gauge: gauge$20,
|
|
703
|
+
set: set$19
|
|
712
704
|
};
|
|
713
705
|
|
|
714
|
-
var gauge$
|
|
706
|
+
var gauge$21 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
|
|
715
707
|
|
|
716
|
-
function set$
|
|
717
|
-
handleInt$1(gauge$
|
|
708
|
+
function set$20(count, effectName) {
|
|
709
|
+
handleInt$1(gauge$21, effectName, count);
|
|
718
710
|
}
|
|
719
711
|
|
|
720
712
|
var EffectCacheCount = {
|
|
721
|
-
gauge: gauge$
|
|
722
|
-
set: set$
|
|
713
|
+
gauge: gauge$21,
|
|
714
|
+
set: set$20
|
|
715
|
+
};
|
|
716
|
+
|
|
717
|
+
var counter$6 = makeOrThrow("envio_effect_cache_invalidations_count", "The number of effect cache invalidations.", effectLabelsSchema);
|
|
718
|
+
|
|
719
|
+
function increment$6(effectName) {
|
|
720
|
+
increment(counter$6, effectName);
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
var EffectCacheInvalidationsCount = {
|
|
724
|
+
counter: counter$6,
|
|
725
|
+
increment: increment$6
|
|
723
726
|
};
|
|
724
727
|
|
|
725
728
|
var operationLabelsSchema = S$RescriptSchema.object(function (s) {
|
|
@@ -730,7 +733,7 @@ var timeCounter$2 = makeOrThrow("envio_storage_load_time", "Processing time take
|
|
|
730
733
|
|
|
731
734
|
var totalTimeCounter = makeOrThrow("envio_storage_load_total_time", "Cumulative time spent loading data from storage during the indexing process. (milliseconds)", operationLabelsSchema);
|
|
732
735
|
|
|
733
|
-
var counter$
|
|
736
|
+
var counter$7 = makeOrThrow("envio_storage_load_count", "Cumulative number of successful storage load operations during the indexing process.", operationLabelsSchema);
|
|
734
737
|
|
|
735
738
|
var whereSizeCounter = makeOrThrow("envio_storage_load_where_size", "Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.", operationLabelsSchema);
|
|
736
739
|
|
|
@@ -759,7 +762,7 @@ function endOperation(timerRef, operation, whereSize, size) {
|
|
|
759
762
|
Utils.Dict.deleteInPlace(operations, operation);
|
|
760
763
|
}
|
|
761
764
|
handleInt(totalTimeCounter, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timerRef))));
|
|
762
|
-
increment(counter$
|
|
765
|
+
increment(counter$7, operation);
|
|
763
766
|
handleInt(whereSizeCounter, operation, whereSize);
|
|
764
767
|
handleInt(sizeCounter, operation, size);
|
|
765
768
|
}
|
|
@@ -768,7 +771,7 @@ var StorageLoad = {
|
|
|
768
771
|
operationLabelsSchema: operationLabelsSchema,
|
|
769
772
|
timeCounter: timeCounter$2,
|
|
770
773
|
totalTimeCounter: totalTimeCounter,
|
|
771
|
-
counter: counter$
|
|
774
|
+
counter: counter$7,
|
|
772
775
|
whereSizeCounter: whereSizeCounter,
|
|
773
776
|
sizeCounter: sizeCounter,
|
|
774
777
|
operations: operations,
|
|
@@ -817,13 +820,13 @@ exports.RollbackEnabled = RollbackEnabled;
|
|
|
817
820
|
exports.RollbackSuccess = RollbackSuccess;
|
|
818
821
|
exports.RollbackHistoryPrune = RollbackHistoryPrune;
|
|
819
822
|
exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
|
|
820
|
-
exports.ProcessingBlockNumber = ProcessingBlockNumber;
|
|
821
|
-
exports.ProcessingBatchSize = ProcessingBatchSize;
|
|
822
823
|
exports.ProcessingMaxBatchSize = ProcessingMaxBatchSize;
|
|
823
824
|
exports.ProgressBlockNumber = ProgressBlockNumber;
|
|
824
825
|
exports.ProgressEventsCount = ProgressEventsCount;
|
|
826
|
+
exports.ProgressBatchCount = ProgressBatchCount;
|
|
825
827
|
exports.effectLabelsSchema = effectLabelsSchema;
|
|
826
828
|
exports.EffectCallsCount = EffectCallsCount;
|
|
827
829
|
exports.EffectCacheCount = EffectCacheCount;
|
|
830
|
+
exports.EffectCacheInvalidationsCount = EffectCacheInvalidationsCount;
|
|
828
831
|
exports.StorageLoad = StorageLoad;
|
|
829
832
|
/* loadEntitiesDurationCounter Not a pure module */
|
|
@@ -36,32 +36,31 @@ let make = (
|
|
|
36
36
|
let getSafeCheckpointId = (safeCheckpointTracking: t, ~sourceBlockNumber: int) => {
|
|
37
37
|
let safeBlockNumber = sourceBlockNumber - safeCheckpointTracking.maxReorgDepth
|
|
38
38
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
39
|
+
switch safeCheckpointTracking.checkpointIds {
|
|
40
|
+
| [] => 0
|
|
41
|
+
| _
|
|
42
|
+
if safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(0) > safeBlockNumber => 0
|
|
43
|
+
| [checkpointId] => checkpointId
|
|
44
|
+
| _ => {
|
|
45
|
+
let trackingCheckpointsCount = safeCheckpointTracking.checkpointIds->Array.length
|
|
46
|
+
let result = ref(None)
|
|
47
|
+
let idx = ref(1)
|
|
48
48
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
}
|
|
57
|
-
idx := idx.contents + 1
|
|
49
|
+
while idx.contents < trackingCheckpointsCount && result.contents === None {
|
|
50
|
+
if (
|
|
51
|
+
safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx.contents) >
|
|
52
|
+
safeBlockNumber
|
|
53
|
+
) {
|
|
54
|
+
result :=
|
|
55
|
+
Some(safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx.contents - 1))
|
|
58
56
|
}
|
|
57
|
+
idx := idx.contents + 1
|
|
58
|
+
}
|
|
59
59
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
}
|
|
60
|
+
switch result.contents {
|
|
61
|
+
| Some(checkpointId) => checkpointId
|
|
62
|
+
| None =>
|
|
63
|
+
safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(trackingCheckpointsCount - 1)
|
|
65
64
|
}
|
|
66
65
|
}
|
|
67
66
|
}
|
|
@@ -21,13 +21,17 @@ function make(maxReorgDepth, shouldRollbackOnReorg, chainReorgCheckpoints) {
|
|
|
21
21
|
|
|
22
22
|
function getSafeCheckpointId(safeCheckpointTracking, sourceBlockNumber) {
|
|
23
23
|
var safeBlockNumber = sourceBlockNumber - safeCheckpointTracking.maxReorgDepth | 0;
|
|
24
|
+
var match = safeCheckpointTracking.checkpointIds;
|
|
25
|
+
if (match.length === 0) {
|
|
26
|
+
return 0;
|
|
27
|
+
}
|
|
24
28
|
if (safeCheckpointTracking.checkpointBlockNumbers[0] > safeBlockNumber) {
|
|
25
29
|
return 0;
|
|
26
30
|
}
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
return safeCheckpointTracking.checkpointIds[0];
|
|
31
|
+
if (match.length === 1) {
|
|
32
|
+
return match[0];
|
|
30
33
|
}
|
|
34
|
+
var trackingCheckpointsCount = safeCheckpointTracking.checkpointIds.length;
|
|
31
35
|
var result;
|
|
32
36
|
var idx = 1;
|
|
33
37
|
while(idx < trackingCheckpointsCount && result === undefined) {
|
package/src/db/EntityHistory.res
CHANGED
|
@@ -43,8 +43,9 @@ type t<'entity> = {
|
|
|
43
43
|
}
|
|
44
44
|
|
|
45
45
|
let maxPgTableNameLength = 63
|
|
46
|
+
let historyTablePrefix = "envio_history_"
|
|
46
47
|
let historyTableName = (~entityName, ~entityIndex) => {
|
|
47
|
-
let fullName =
|
|
48
|
+
let fullName = historyTablePrefix ++ entityName
|
|
48
49
|
if fullName->String.length > maxPgTableNameLength {
|
|
49
50
|
let entityIndexStr = entityIndex->Belt.Int.toString
|
|
50
51
|
fullName->Js.String.slice(~from=0, ~to_=maxPgTableNameLength - entityIndexStr->String.length) ++
|
|
@@ -86,8 +87,6 @@ let fromTable = (table: table, ~schema: S.t<'entity>, ~entityIndex): t<'entity>
|
|
|
86
87
|
~isPrimaryKey=true,
|
|
87
88
|
)
|
|
88
89
|
|
|
89
|
-
// let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName)
|
|
90
|
-
|
|
91
90
|
let entityTableName = table.tableName
|
|
92
91
|
let historyTableName = historyTableName(~entityName=entityTableName, ~entityIndex)
|
|
93
92
|
//ignore composite indices
|
|
@@ -39,8 +39,10 @@ function makeSetUpdateSchema(entitySchema) {
|
|
|
39
39
|
});
|
|
40
40
|
}
|
|
41
41
|
|
|
42
|
+
var historyTablePrefix = "envio_history_";
|
|
43
|
+
|
|
42
44
|
function historyTableName(entityName, entityIndex) {
|
|
43
|
-
var fullName =
|
|
45
|
+
var fullName = historyTablePrefix + entityName;
|
|
44
46
|
if (fullName.length <= 63) {
|
|
45
47
|
return fullName;
|
|
46
48
|
}
|
|
@@ -181,6 +183,7 @@ exports.changeFieldName = changeFieldName;
|
|
|
181
183
|
exports.checkpointIdFieldName = checkpointIdFieldName;
|
|
182
184
|
exports.makeSetUpdateSchema = makeSetUpdateSchema;
|
|
183
185
|
exports.maxPgTableNameLength = maxPgTableNameLength;
|
|
186
|
+
exports.historyTablePrefix = historyTablePrefix;
|
|
184
187
|
exports.historyTableName = historyTableName;
|
|
185
188
|
exports.fromTable = fromTable;
|
|
186
189
|
exports.makePruneStaleEntityHistoryQuery = makePruneStaleEntityHistoryQuery;
|