envio 2.27.5-rc.0 → 2.27.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/ErrorHandling.res +4 -5
- package/src/ErrorHandling.res.js +7 -7
- package/src/Hasura.res +4 -4
- package/src/Hasura.res.js +4 -5
- package/src/Internal.res +0 -7
- package/src/Internal.res.js +0 -11
- package/src/LoadManager.res +1 -1
- package/src/LoadManager.res.js +1 -2
- package/src/Logging.res +1 -1
- package/src/Logging.res.js +2 -2
- package/src/PgStorage.res +3 -5
- package/src/PgStorage.res.js +2 -2
- package/src/Time.res +1 -1
- package/src/Time.res.js +1 -2
- package/src/Utils.res +7 -0
- package/src/Utils.res.js +11 -0
- package/src/db/EntityHistory.res +97 -33
- package/src/db/EntityHistory.res.js +21 -6
- package/src/db/Table.res +1 -0
- package/src/sources/SourceManager.res +3 -3
- package/src/sources/SourceManager.res.js +3 -4
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.27.
|
|
3
|
+
"version": "v2.27.6",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"main": "./index.js",
|
|
@@ -25,10 +25,10 @@
|
|
|
25
25
|
},
|
|
26
26
|
"homepage": "https://envio.dev",
|
|
27
27
|
"optionalDependencies": {
|
|
28
|
-
"envio-linux-x64": "v2.27.
|
|
29
|
-
"envio-linux-arm64": "v2.27.
|
|
30
|
-
"envio-darwin-x64": "v2.27.
|
|
31
|
-
"envio-darwin-arm64": "v2.27.
|
|
28
|
+
"envio-linux-x64": "v2.27.6",
|
|
29
|
+
"envio-linux-arm64": "v2.27.6",
|
|
30
|
+
"envio-darwin-x64": "v2.27.6",
|
|
31
|
+
"envio-darwin-arm64": "v2.27.6"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
34
|
"@envio-dev/hypersync-client": "0.6.5",
|
package/src/ErrorHandling.res
CHANGED
|
@@ -6,18 +6,17 @@ let make = (exn, ~logger=Logging.getLogger(), ~msg=?) => {
|
|
|
6
6
|
|
|
7
7
|
let log = (self: t) => {
|
|
8
8
|
switch self {
|
|
9
|
-
| {exn, msg: Some(msg), logger} =>
|
|
10
|
-
|
|
11
|
-
| {exn, msg: None, logger} => logger->Logging.childError(exn->Internal.prettifyExn)
|
|
9
|
+
| {exn, msg: Some(msg), logger} => logger->Logging.childErrorWithExn(exn->Utils.prettifyExn, msg)
|
|
10
|
+
| {exn, msg: None, logger} => logger->Logging.childError(exn->Utils.prettifyExn)
|
|
12
11
|
}
|
|
13
12
|
}
|
|
14
13
|
|
|
15
14
|
let raiseExn = (self: t) => {
|
|
16
|
-
self.exn->
|
|
15
|
+
self.exn->Utils.prettifyExn->raise
|
|
17
16
|
}
|
|
18
17
|
|
|
19
18
|
let mkLogAndRaise = (~logger=?, ~msg=?, exn) => {
|
|
20
|
-
let exn = exn->
|
|
19
|
+
let exn = exn->Utils.prettifyExn
|
|
21
20
|
exn->make(~logger?, ~msg?)->log
|
|
22
21
|
exn->raise
|
|
23
22
|
}
|
package/src/ErrorHandling.res.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
|
+
var Utils = require("./Utils.res.js");
|
|
4
5
|
var Logging = require("./Logging.res.js");
|
|
5
|
-
var Internal = require("./Internal.res.js");
|
|
6
6
|
|
|
7
7
|
function make(exn, loggerOpt, msg) {
|
|
8
8
|
var logger = loggerOpt !== undefined ? loggerOpt : Logging.getLogger();
|
|
@@ -18,18 +18,18 @@ function log(self) {
|
|
|
18
18
|
var exn = self.exn;
|
|
19
19
|
var logger = self.logger;
|
|
20
20
|
if (msg !== undefined) {
|
|
21
|
-
return Logging.childErrorWithExn(logger,
|
|
21
|
+
return Logging.childErrorWithExn(logger, Utils.prettifyExn(exn), msg);
|
|
22
22
|
} else {
|
|
23
|
-
return Logging.childError(logger,
|
|
23
|
+
return Logging.childError(logger, Utils.prettifyExn(exn));
|
|
24
24
|
}
|
|
25
25
|
}
|
|
26
26
|
|
|
27
27
|
function raiseExn(self) {
|
|
28
|
-
throw
|
|
28
|
+
throw Utils.prettifyExn(self.exn);
|
|
29
29
|
}
|
|
30
30
|
|
|
31
31
|
function mkLogAndRaise(logger, msg, exn) {
|
|
32
|
-
var exn$1 =
|
|
32
|
+
var exn$1 = Utils.prettifyExn(exn);
|
|
33
33
|
log(make(exn$1, logger, msg));
|
|
34
34
|
throw exn$1;
|
|
35
35
|
}
|
|
@@ -44,7 +44,7 @@ function unwrapLogAndRaise(logger, msg, result) {
|
|
|
44
44
|
|
|
45
45
|
function logAndRaise(self) {
|
|
46
46
|
log(self);
|
|
47
|
-
throw
|
|
47
|
+
throw Utils.prettifyExn(self.exn);
|
|
48
48
|
}
|
|
49
49
|
|
|
50
50
|
exports.make = make;
|
|
@@ -53,4 +53,4 @@ exports.raiseExn = raiseExn;
|
|
|
53
53
|
exports.mkLogAndRaise = mkLogAndRaise;
|
|
54
54
|
exports.unwrapLogAndRaise = unwrapLogAndRaise;
|
|
55
55
|
exports.logAndRaise = logAndRaise;
|
|
56
|
-
/*
|
|
56
|
+
/* Utils Not a pure module */
|
package/src/Hasura.res
CHANGED
|
@@ -83,7 +83,7 @@ let clearHasuraMetadata = async (~endpoint, ~auth) => {
|
|
|
83
83
|
| exn =>
|
|
84
84
|
Logging.error({
|
|
85
85
|
"msg": `EE806: There was an issue clearing metadata in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
86
|
-
"err": exn->
|
|
86
|
+
"err": exn->Utils.prettifyExn,
|
|
87
87
|
})
|
|
88
88
|
}
|
|
89
89
|
}
|
|
@@ -125,7 +125,7 @@ let trackTables = async (~endpoint, ~auth, ~pgSchema, ~tableNames: array<string>
|
|
|
125
125
|
Logging.error({
|
|
126
126
|
"msg": `EE807: There was an issue tracking tables in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
127
127
|
"tableNames": tableNames,
|
|
128
|
-
"err": exn->
|
|
128
|
+
"err": exn->Utils.prettifyExn,
|
|
129
129
|
})
|
|
130
130
|
}
|
|
131
131
|
}
|
|
@@ -172,7 +172,7 @@ let createSelectPermissions = async (
|
|
|
172
172
|
Logging.error({
|
|
173
173
|
"msg": `EE808: There was an issue setting up view permissions for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
174
174
|
"tableName": tableName,
|
|
175
|
-
"err": exn->
|
|
175
|
+
"err": exn->Utils.prettifyExn,
|
|
176
176
|
})
|
|
177
177
|
}
|
|
178
178
|
}
|
|
@@ -213,7 +213,7 @@ let createEntityRelationship = async (
|
|
|
213
213
|
Logging.error({
|
|
214
214
|
"msg": `EE808: There was an issue setting up ${relationshipType} relationship for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
215
215
|
"tableName": tableName,
|
|
216
|
-
"err": exn->
|
|
216
|
+
"err": exn->Utils.prettifyExn,
|
|
217
217
|
})
|
|
218
218
|
}
|
|
219
219
|
}
|
package/src/Hasura.res.js
CHANGED
|
@@ -6,7 +6,6 @@ var Table = require("./db/Table.res.js");
|
|
|
6
6
|
var Utils = require("./Utils.res.js");
|
|
7
7
|
var Schema = require("./db/Schema.res.js");
|
|
8
8
|
var Logging = require("./Logging.res.js");
|
|
9
|
-
var Internal = require("./Internal.res.js");
|
|
10
9
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
11
10
|
var Caml_splice_call = require("rescript/lib/js/caml_splice_call.js");
|
|
12
11
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
@@ -102,7 +101,7 @@ async function clearHasuraMetadata(endpoint, auth) {
|
|
|
102
101
|
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
103
102
|
return Logging.error({
|
|
104
103
|
msg: "EE806: There was an issue clearing metadata in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
105
|
-
err:
|
|
104
|
+
err: Utils.prettifyExn(exn)
|
|
106
105
|
});
|
|
107
106
|
}
|
|
108
107
|
}
|
|
@@ -138,7 +137,7 @@ async function trackTables(endpoint, auth, pgSchema, tableNames) {
|
|
|
138
137
|
return Logging.error({
|
|
139
138
|
msg: "EE807: There was an issue tracking tables in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
140
139
|
tableNames: tableNames,
|
|
141
|
-
err:
|
|
140
|
+
err: Utils.prettifyExn(exn)
|
|
142
141
|
});
|
|
143
142
|
}
|
|
144
143
|
}
|
|
@@ -174,7 +173,7 @@ async function createSelectPermissions(auth, endpoint, tableName, pgSchema, resp
|
|
|
174
173
|
return Logging.error({
|
|
175
174
|
msg: "EE808: There was an issue setting up view permissions for the " + tableName + " table in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
176
175
|
tableName: tableName,
|
|
177
|
-
err:
|
|
176
|
+
err: Utils.prettifyExn(exn)
|
|
178
177
|
});
|
|
179
178
|
}
|
|
180
179
|
}
|
|
@@ -199,7 +198,7 @@ async function createEntityRelationship(pgSchema, endpoint, auth, tableName, rel
|
|
|
199
198
|
return Logging.error({
|
|
200
199
|
msg: "EE808: There was an issue setting up " + relationshipType + " relationship for the " + tableName + " table in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
201
200
|
tableName: tableName,
|
|
202
|
-
err:
|
|
201
|
+
err: Utils.prettifyExn(exn)
|
|
203
202
|
});
|
|
204
203
|
}
|
|
205
204
|
}
|
package/src/Internal.res
CHANGED
|
@@ -238,10 +238,3 @@ let makeCacheTable = (~effectName) => {
|
|
|
238
238
|
|
|
239
239
|
@genType.import(("./Types.ts", "Invalid"))
|
|
240
240
|
type noEventFilters
|
|
241
|
-
|
|
242
|
-
let prettifyExn = exn => {
|
|
243
|
-
switch exn->Js.Exn.anyToExnInternal {
|
|
244
|
-
| Js.Exn.Error(e) => e->(Utils.magic: Js.Exn.t => exn)
|
|
245
|
-
| exn => exn
|
|
246
|
-
}
|
|
247
|
-
}
|
package/src/Internal.res.js
CHANGED
|
@@ -8,7 +8,6 @@ var Address = require("./Address.res.js");
|
|
|
8
8
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
9
9
|
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
10
10
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
11
|
-
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
12
11
|
|
|
13
12
|
var fuelSupplyParamsSchema = S$RescriptSchema.schema(function (s) {
|
|
14
13
|
return {
|
|
@@ -44,19 +43,9 @@ function makeCacheTable(effectName) {
|
|
|
44
43
|
]);
|
|
45
44
|
}
|
|
46
45
|
|
|
47
|
-
function prettifyExn(exn) {
|
|
48
|
-
var e = Caml_js_exceptions.internalToOCamlException(exn);
|
|
49
|
-
if (e.RE_EXN_ID === Js_exn.$$Error) {
|
|
50
|
-
return e._1;
|
|
51
|
-
} else {
|
|
52
|
-
return e;
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
|
|
56
46
|
exports.fuelSupplyParamsSchema = fuelSupplyParamsSchema;
|
|
57
47
|
exports.fuelTransferParamsSchema = fuelTransferParamsSchema;
|
|
58
48
|
exports.makeEnumConfig = makeEnumConfig;
|
|
59
49
|
exports.cacheTablePrefix = cacheTablePrefix;
|
|
60
50
|
exports.makeCacheTable = makeCacheTable;
|
|
61
|
-
exports.prettifyExn = prettifyExn;
|
|
62
51
|
/* fuelSupplyParamsSchema Not a pure module */
|
package/src/LoadManager.res
CHANGED
|
@@ -71,7 +71,7 @@ let schedule = async loadManager => {
|
|
|
71
71
|
await group.load(inputsToLoad)
|
|
72
72
|
} catch {
|
|
73
73
|
| exn => {
|
|
74
|
-
let exn = exn->
|
|
74
|
+
let exn = exn->Utils.prettifyExn
|
|
75
75
|
currentInputKeys->Array.forEach(inputKey => {
|
|
76
76
|
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
77
77
|
call.reject(exn)
|
package/src/LoadManager.res.js
CHANGED
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
'use strict';
|
|
3
3
|
|
|
4
4
|
var Utils = require("./Utils.res.js");
|
|
5
|
-
var Internal = require("./Internal.res.js");
|
|
6
5
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
7
6
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
8
7
|
|
|
@@ -47,7 +46,7 @@ async function schedule(loadManager) {
|
|
|
47
46
|
}
|
|
48
47
|
catch (raw_exn){
|
|
49
48
|
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
50
|
-
var exn$1 =
|
|
49
|
+
var exn$1 = Utils.prettifyExn(exn);
|
|
51
50
|
Belt_Array.forEach(currentInputKeys, (function (inputKey) {
|
|
52
51
|
var call = calls[inputKey];
|
|
53
52
|
call.reject(exn$1);
|
package/src/Logging.res
CHANGED
|
@@ -183,5 +183,5 @@ let getUserLogger = (eventItem): Envio.logger => {
|
|
|
183
183
|
warn: (message: string, ~params=?) => eventItem->logForItem(#uwarn, message, ~params?),
|
|
184
184
|
error: (message: string, ~params=?) => eventItem->logForItem(#uerror, message, ~params?),
|
|
185
185
|
errorWithExn: (message: string, exn) =>
|
|
186
|
-
eventItem->logForItem(#uerror, message, ~params={"err": exn->
|
|
186
|
+
eventItem->logForItem(#uerror, message, ~params={"err": exn->Utils.prettifyExn}),
|
|
187
187
|
}
|
package/src/Logging.res.js
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
|
|
4
4
|
var Pino = require("./bindings/Pino.res.js");
|
|
5
5
|
var Pino$1 = require("pino");
|
|
6
|
+
var Utils = require("./Utils.res.js");
|
|
6
7
|
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
7
8
|
var Js_dict = require("rescript/lib/js/js_dict.js");
|
|
8
9
|
var Caml_obj = require("rescript/lib/js/caml_obj.js");
|
|
9
|
-
var Internal = require("./Internal.res.js");
|
|
10
10
|
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
11
11
|
var EcsPinoFormat = require("@elastic/ecs-pino-format");
|
|
12
12
|
|
|
@@ -242,7 +242,7 @@ function getUserLogger(eventItem) {
|
|
|
242
242
|
}),
|
|
243
243
|
errorWithExn: (function (message, exn) {
|
|
244
244
|
var params = {
|
|
245
|
-
err:
|
|
245
|
+
err: Utils.prettifyExn(exn)
|
|
246
246
|
};
|
|
247
247
|
getEventLogger(eventItem)["uerror"](params, message);
|
|
248
248
|
})
|
package/src/PgStorage.res
CHANGED
|
@@ -325,9 +325,7 @@ let removeInvalidUtf8InPlace = entities =>
|
|
|
325
325
|
})
|
|
326
326
|
})
|
|
327
327
|
|
|
328
|
-
let pgErrorMessageSchema = S.object(s =>
|
|
329
|
-
s.field("message", S.string)
|
|
330
|
-
)
|
|
328
|
+
let pgErrorMessageSchema = S.object(s => s.field("message", S.string))
|
|
331
329
|
|
|
332
330
|
exception PgEncodingError({table: Table.table})
|
|
333
331
|
|
|
@@ -389,7 +387,7 @@ let setOrThrow = async (sql, ~items, ~table: Table.table, ~itemSchema, ~pgSchema
|
|
|
389
387
|
raise(
|
|
390
388
|
Persistence.StorageError({
|
|
391
389
|
message: `Failed to insert items into table "${table.tableName}"`,
|
|
392
|
-
reason: exn->
|
|
390
|
+
reason: exn->Utils.prettifyExn,
|
|
393
391
|
}),
|
|
394
392
|
)
|
|
395
393
|
}
|
|
@@ -765,7 +763,7 @@ let make = (
|
|
|
765
763
|
}
|
|
766
764
|
}
|
|
767
765
|
} catch {
|
|
768
|
-
| exn => Logging.errorWithExn(exn->
|
|
766
|
+
| exn => Logging.errorWithExn(exn->Utils.prettifyExn, `Failed to dump cache.`)
|
|
769
767
|
}
|
|
770
768
|
}
|
|
771
769
|
|
package/src/PgStorage.res.js
CHANGED
|
@@ -277,7 +277,7 @@ async function setOrThrow(sql, items, table, itemSchema, pgSchema) {
|
|
|
277
277
|
throw {
|
|
278
278
|
RE_EXN_ID: Persistence.StorageError,
|
|
279
279
|
message: "Failed to insert items into table \"" + table.tableName + "\"",
|
|
280
|
-
reason:
|
|
280
|
+
reason: Utils.prettifyExn(exn),
|
|
281
281
|
Error: new Error()
|
|
282
282
|
};
|
|
283
283
|
}
|
|
@@ -523,7 +523,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
523
523
|
}
|
|
524
524
|
catch (raw_exn){
|
|
525
525
|
var exn$1 = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
526
|
-
return Logging.errorWithExn(
|
|
526
|
+
return Logging.errorWithExn(Utils.prettifyExn(exn$1), "Failed to dump cache.");
|
|
527
527
|
}
|
|
528
528
|
};
|
|
529
529
|
var restoreEffectCache = async function (withUpload) {
|
package/src/Time.res
CHANGED
|
@@ -17,7 +17,7 @@ let rec retryAsyncWithExponentialBackOff = async (
|
|
|
17
17
|
let log = retryCount === 0 ? Logging.childTrace : Logging.childWarn
|
|
18
18
|
logger->log({
|
|
19
19
|
"msg": `Retrying query ${nextRetryCount->Belt.Int.toString}/${maxRetries->Belt.Int.toString} in ${backOffMillis->Belt.Int.toString}ms - waiting for correct result.`,
|
|
20
|
-
"err": exn->
|
|
20
|
+
"err": exn->Utils.prettifyExn,
|
|
21
21
|
})
|
|
22
22
|
await resolvePromiseAfterDelay(~delayMilliseconds=backOffMillis)
|
|
23
23
|
|
package/src/Time.res.js
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
|
|
4
4
|
var Utils = require("./Utils.res.js");
|
|
5
5
|
var Logging = require("./Logging.res.js");
|
|
6
|
-
var Internal = require("./Internal.res.js");
|
|
7
6
|
var ErrorHandling = require("./ErrorHandling.res.js");
|
|
8
7
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
9
8
|
|
|
@@ -26,7 +25,7 @@ async function retryAsyncWithExponentialBackOff(backOffMillisOpt, multiplicative
|
|
|
26
25
|
var log = retryCount === 0 ? Logging.childTrace : Logging.childWarn;
|
|
27
26
|
log(logger, {
|
|
28
27
|
msg: "Retrying query " + String(nextRetryCount) + "/" + String(maxRetries) + " in " + String(backOffMillis) + "ms - waiting for correct result.",
|
|
29
|
-
err:
|
|
28
|
+
err: Utils.prettifyExn(exn)
|
|
30
29
|
});
|
|
31
30
|
await Utils.delay(backOffMillis);
|
|
32
31
|
return await retryAsyncWithExponentialBackOff(Math.imul(backOffMillis, multiplicative), multiplicative, nextRetryCount, maxRetries, logger, f);
|
package/src/Utils.res
CHANGED
package/src/Utils.res.js
CHANGED
|
@@ -10,6 +10,7 @@ var Belt_Option = require("rescript/lib/js/belt_Option.js");
|
|
|
10
10
|
var Caml_option = require("rescript/lib/js/caml_option.js");
|
|
11
11
|
var Caml_splice_call = require("rescript/lib/js/caml_splice_call.js");
|
|
12
12
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
13
|
+
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
13
14
|
|
|
14
15
|
function delay(milliseconds) {
|
|
15
16
|
return new Promise((function (resolve, param) {
|
|
@@ -530,6 +531,15 @@ var Hash = {
|
|
|
530
531
|
makeOrThrow: makeOrThrow
|
|
531
532
|
};
|
|
532
533
|
|
|
534
|
+
function prettifyExn(exn) {
|
|
535
|
+
var e = Caml_js_exceptions.internalToOCamlException(exn);
|
|
536
|
+
if (e.RE_EXN_ID === Js_exn.$$Error) {
|
|
537
|
+
return e._1;
|
|
538
|
+
} else {
|
|
539
|
+
return e;
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
|
|
533
543
|
exports.delay = delay;
|
|
534
544
|
exports.$$Object = $$Object;
|
|
535
545
|
exports.$$Error = $$Error;
|
|
@@ -548,4 +558,5 @@ exports.$$WeakMap = $$WeakMap;
|
|
|
548
558
|
exports.$$Map = $$Map;
|
|
549
559
|
exports.$$Proxy = $$Proxy;
|
|
550
560
|
exports.Hash = Hash;
|
|
561
|
+
exports.prettifyExn = prettifyExn;
|
|
551
562
|
/* variantTag Not a pure module */
|
package/src/db/EntityHistory.res
CHANGED
|
@@ -251,58 +251,57 @@ let fromTable = (table: table, ~pgSchema, ~schema: S.t<'entity>): t<'entity> =>
|
|
|
251
251
|
|
|
252
252
|
let createInsertFnQuery = {
|
|
253
253
|
`CREATE OR REPLACE FUNCTION ${insertFnName}(${historyRowArg} ${historyTablePath}, should_copy_current_entity BOOLEAN)
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
254
|
+
RETURNS void AS $$
|
|
255
|
+
DECLARE
|
|
256
|
+
v_previous_record RECORD;
|
|
257
|
+
v_origin_record RECORD;
|
|
258
|
+
BEGIN
|
|
259
|
+
-- Check if previous values are not provided
|
|
260
|
+
IF ${previousHistoryFieldsAreNullStr} THEN
|
|
261
|
+
-- Find the most recent record for the same id
|
|
262
|
+
SELECT ${currentChangeFieldNamesCommaSeparated} INTO v_previous_record
|
|
263
|
+
FROM ${historyTablePath}
|
|
264
|
+
WHERE ${id} = ${historyRowArg}.${id}
|
|
265
|
+
ORDER BY ${currentChangeFieldNames
|
|
266
266
|
->Belt.Array.map(fieldName => fieldName ++ " DESC")
|
|
267
267
|
->Js.Array2.joinWith(", ")}
|
|
268
|
-
|
|
268
|
+
LIMIT 1;
|
|
269
269
|
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
270
|
+
-- If a previous record exists, use its values
|
|
271
|
+
IF FOUND THEN
|
|
272
|
+
${Belt.Array.zip(currentChangeFieldNames, previousChangeFieldNames)
|
|
273
273
|
->Belt.Array.map(((currentFieldName, previousFieldName)) => {
|
|
274
274
|
`${historyRowArg}.${previousFieldName} := v_previous_record.${currentFieldName};`
|
|
275
275
|
})
|
|
276
276
|
->Js.Array2.joinWith(" ")}
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
277
|
+
ElSIF should_copy_current_entity THEN
|
|
278
|
+
-- Check if a value for the id exists in the origin table and if so, insert a history row for it.
|
|
279
|
+
SELECT ${dataFieldNamesCommaSeparated} FROM ${originTablePath} WHERE id = ${historyRowArg}.${id} INTO v_origin_record;
|
|
280
|
+
IF FOUND THEN
|
|
281
|
+
INSERT INTO ${historyTablePath} (${currentChangeFieldNamesCommaSeparated}, ${dataFieldNamesCommaSeparated}, "${actionFieldName}")
|
|
282
|
+
-- SET the current change data fields to 0 since we don't know what they were
|
|
283
|
+
-- and it doesn't matter provided they are less than any new values
|
|
284
|
+
VALUES (${currentChangeFieldNames
|
|
285
285
|
->Belt.Array.map(_ => "0")
|
|
286
286
|
->Js.Array2.joinWith(", ")}, ${dataFieldNames
|
|
287
287
|
->Belt.Array.map(fieldName => `v_origin_record."${fieldName}"`)
|
|
288
288
|
->Js.Array2.joinWith(", ")}, 'SET');
|
|
289
289
|
|
|
290
|
-
|
|
290
|
+
${previousChangeFieldNames
|
|
291
291
|
->Belt.Array.map(previousFieldName => {
|
|
292
292
|
`${historyRowArg}.${previousFieldName} := 0;`
|
|
293
293
|
})
|
|
294
294
|
->Js.Array2.joinWith(" ")}
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
295
|
+
END IF;
|
|
296
|
+
END IF;
|
|
297
|
+
END IF;
|
|
298
298
|
|
|
299
|
-
|
|
300
|
-
|
|
299
|
+
INSERT INTO ${historyTablePath} (${allFieldNamesDoubleQuoted->Js.Array2.joinWith(", ")})
|
|
300
|
+
VALUES (${allFieldNamesDoubleQuoted
|
|
301
301
|
->Belt.Array.map(fieldName => `${historyRowArg}.${fieldName}`)
|
|
302
302
|
->Js.Array2.joinWith(", ")});
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
`
|
|
303
|
+
END;
|
|
304
|
+
$$ LANGUAGE plpgsql;`
|
|
306
305
|
}
|
|
307
306
|
|
|
308
307
|
let insertFnString = `(sql, rowArgs, shouldCopyCurrentEntity) =>
|
|
@@ -318,3 +317,68 @@ let fromTable = (table: table, ~pgSchema, ~schema: S.t<'entity>): t<'entity> =>
|
|
|
318
317
|
|
|
319
318
|
{table, createInsertFnQuery, schema, schemaRows: S.array(schema), insertFn}
|
|
320
319
|
}
|
|
320
|
+
|
|
321
|
+
type safeReorgBlocks = {
|
|
322
|
+
chainIds: array<int>,
|
|
323
|
+
blockNumbers: array<int>,
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// We want to keep only the minimum history needed to survive chain reorgs and delete everything older.
|
|
327
|
+
// Each chain gives us a "safe block": we assume reorgs will never happen at that block.
|
|
328
|
+
//
|
|
329
|
+
// What we keep per entity id:
|
|
330
|
+
// - The latest history row at or before the safe block (the "anchor"). This is the last state that could
|
|
331
|
+
// ever be relevant during a rollback.
|
|
332
|
+
// - If there are history rows in reorg threshold (after the safe block), we keep the anchor and delete all older rows.
|
|
333
|
+
// - If there are no history rows in reorg threshold (after the safe block), even the anchor is redundant, so we delete it too.
|
|
334
|
+
//
|
|
335
|
+
// Why this is safe:
|
|
336
|
+
// - Rollbacks will not cross the safe block, so rows older than the anchor can never be referenced again.
|
|
337
|
+
// - If nothing changed in reorg threshold (after the safe block), the current state for that id can be reconstructed from the
|
|
338
|
+
// origin table; we do not need a pre-safe anchor for it.
|
|
339
|
+
//
|
|
340
|
+
// Performance notes:
|
|
341
|
+
// - Multi-chain batching: inputs are expanded with unnest, letting one prepared statement prune many chains and
|
|
342
|
+
// enabling the planner to use indexes per chain_id efficiently.
|
|
343
|
+
// - Minimal row touches: we only compute keep_serial per id and delete strictly older rows; this reduces write
|
|
344
|
+
// amplification and vacuum pressure compared to broad time-based purges.
|
|
345
|
+
// - Contention-awareness: the DELETE joins on ids first, narrowing target rows early to limit locking and buffer churn.
|
|
346
|
+
let makePruneStaleEntityHistoryQuery = (~entityName, ~pgSchema) => {
|
|
347
|
+
let historyTableName = entityName ++ "_history"
|
|
348
|
+
let historyTableRef = `"${pgSchema}"."${historyTableName}"`
|
|
349
|
+
|
|
350
|
+
`WITH safe AS (
|
|
351
|
+
SELECT s.chain_id, s.block_number
|
|
352
|
+
FROM unnest($1::int[], $2::bigint[]) AS s(chain_id, block_number)
|
|
353
|
+
),
|
|
354
|
+
max_before_safe AS (
|
|
355
|
+
SELECT t.id, MAX(t.serial) AS keep_serial
|
|
356
|
+
FROM ${historyTableRef} t
|
|
357
|
+
JOIN safe s
|
|
358
|
+
ON s.chain_id = t.entity_history_chain_id
|
|
359
|
+
AND t.entity_history_block_number <= s.block_number
|
|
360
|
+
GROUP BY t.id
|
|
361
|
+
),
|
|
362
|
+
post_safe AS (
|
|
363
|
+
SELECT DISTINCT t.id
|
|
364
|
+
FROM ${historyTableRef} t
|
|
365
|
+
JOIN safe s
|
|
366
|
+
ON s.chain_id = t.entity_history_chain_id
|
|
367
|
+
AND t.entity_history_block_number > s.block_number
|
|
368
|
+
)
|
|
369
|
+
DELETE FROM ${historyTableRef} d
|
|
370
|
+
USING max_before_safe m
|
|
371
|
+
LEFT JOIN post_safe p ON p.id = m.id
|
|
372
|
+
WHERE d.id = m.id
|
|
373
|
+
AND (
|
|
374
|
+
d.serial < m.keep_serial
|
|
375
|
+
OR (p.id IS NULL AND d.serial = m.keep_serial)
|
|
376
|
+
);`
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
let pruneStaleEntityHistory = (sql, ~entityName, ~pgSchema, ~safeReorgBlocks): promise<unit> => {
|
|
380
|
+
sql->Postgres.preparedUnsafe(
|
|
381
|
+
makePruneStaleEntityHistoryQuery(~entityName, ~pgSchema),
|
|
382
|
+
(safeReorgBlocks.chainIds, safeReorgBlocks.blockNumbers)->Utils.magic,
|
|
383
|
+
)
|
|
384
|
+
}
|
|
@@ -252,19 +252,19 @@ function fromTable(table, pgSchema, schema) {
|
|
|
252
252
|
]), (function (fieldName) {
|
|
253
253
|
return "\"" + fieldName + "\"";
|
|
254
254
|
}));
|
|
255
|
-
var createInsertFnQuery = "CREATE OR REPLACE FUNCTION " + insertFnName + "(" + historyRowArg + " " + historyTablePath + ", should_copy_current_entity BOOLEAN)\
|
|
255
|
+
var createInsertFnQuery = "CREATE OR REPLACE FUNCTION " + insertFnName + "(" + historyRowArg + " " + historyTablePath + ", should_copy_current_entity BOOLEAN)\nRETURNS void AS $$\nDECLARE\n v_previous_record RECORD;\n v_origin_record RECORD;\nBEGIN\n -- Check if previous values are not provided\n IF " + previousHistoryFieldsAreNullStr + " THEN\n -- Find the most recent record for the same id\n SELECT " + currentChangeFieldNamesCommaSeparated + " INTO v_previous_record\n FROM " + historyTablePath + "\n WHERE " + id + " = " + historyRowArg + "." + id + "\n ORDER BY " + Belt_Array.map(currentChangeFieldNames, (function (fieldName) {
|
|
256
256
|
return fieldName + " DESC";
|
|
257
|
-
})).join(", ") + "\n
|
|
257
|
+
})).join(", ") + "\n LIMIT 1;\n\n -- If a previous record exists, use its values\n IF FOUND THEN\n " + Belt_Array.map(Belt_Array.zip(currentChangeFieldNames, previousChangeFieldNames), (function (param) {
|
|
258
258
|
return historyRowArg + "." + param[1] + " := v_previous_record." + param[0] + ";";
|
|
259
|
-
})).join(" ") + "\n
|
|
259
|
+
})).join(" ") + "\n ElSIF should_copy_current_entity THEN\n -- Check if a value for the id exists in the origin table and if so, insert a history row for it.\n SELECT " + dataFieldNamesCommaSeparated + " FROM " + originTablePath + " WHERE id = " + historyRowArg + "." + id + " INTO v_origin_record;\n IF FOUND THEN\n INSERT INTO " + historyTablePath + " (" + currentChangeFieldNamesCommaSeparated + ", " + dataFieldNamesCommaSeparated + ", \"" + actionFieldName + "\")\n -- SET the current change data fields to 0 since we don't know what they were\n -- and it doesn't matter provided they are less than any new values\n VALUES (" + Belt_Array.map(currentChangeFieldNames, (function (param) {
|
|
260
260
|
return "0";
|
|
261
261
|
})).join(", ") + ", " + Belt_Array.map(dataFieldNames, (function (fieldName) {
|
|
262
262
|
return "v_origin_record.\"" + fieldName + "\"";
|
|
263
|
-
})).join(", ") + ", 'SET');\n\n
|
|
263
|
+
})).join(", ") + ", 'SET');\n\n " + Belt_Array.map(previousChangeFieldNames, (function (previousFieldName) {
|
|
264
264
|
return historyRowArg + "." + previousFieldName + " := 0;";
|
|
265
|
-
})).join(" ") + "\n
|
|
265
|
+
})).join(" ") + "\n END IF;\n END IF;\n END IF;\n\n INSERT INTO " + historyTablePath + " (" + allFieldNamesDoubleQuoted.join(", ") + ")\n VALUES (" + Belt_Array.map(allFieldNamesDoubleQuoted, (function (fieldName) {
|
|
266
266
|
return historyRowArg + "." + fieldName;
|
|
267
|
-
})).join(", ") + ");\
|
|
267
|
+
})).join(", ") + ");\nEND;\n$$ LANGUAGE plpgsql;";
|
|
268
268
|
var insertFnString = "(sql, rowArgs, shouldCopyCurrentEntity) =>\n sql\`select " + insertFnName + "(ROW(" + Belt_Array.map(allFieldNamesDoubleQuoted, (function (fieldNameDoubleQuoted) {
|
|
269
269
|
return "\${rowArgs[" + fieldNameDoubleQuoted + "]\}";
|
|
270
270
|
})).join(", ") + ", NULL), --NULL argument for SERIAL field\n \${shouldCopyCurrentEntity});\`";
|
|
@@ -279,10 +279,25 @@ function fromTable(table, pgSchema, schema) {
|
|
|
279
279
|
};
|
|
280
280
|
}
|
|
281
281
|
|
|
282
|
+
function makePruneStaleEntityHistoryQuery(entityName, pgSchema) {
|
|
283
|
+
var historyTableName = entityName + "_history";
|
|
284
|
+
var historyTableRef = "\"" + pgSchema + "\".\"" + historyTableName + "\"";
|
|
285
|
+
return "WITH safe AS (\n SELECT s.chain_id, s.block_number\n FROM unnest($1::int[], $2::bigint[]) AS s(chain_id, block_number)\n),\nmax_before_safe AS (\n SELECT t.id, MAX(t.serial) AS keep_serial\n FROM " + historyTableRef + " t\n JOIN safe s\n ON s.chain_id = t.entity_history_chain_id\n AND t.entity_history_block_number <= s.block_number\n GROUP BY t.id\n),\npost_safe AS (\n SELECT DISTINCT t.id\n FROM " + historyTableRef + " t\n JOIN safe s\n ON s.chain_id = t.entity_history_chain_id\n AND t.entity_history_block_number > s.block_number\n)\nDELETE FROM " + historyTableRef + " d\nUSING max_before_safe m\nLEFT JOIN post_safe p ON p.id = m.id\nWHERE d.id = m.id\n AND (\n d.serial < m.keep_serial\n OR (p.id IS NULL AND d.serial = m.keep_serial)\n );";
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
function pruneStaleEntityHistory(sql, entityName, pgSchema, safeReorgBlocks) {
|
|
289
|
+
return sql.unsafe(makePruneStaleEntityHistoryQuery(entityName, pgSchema), [
|
|
290
|
+
safeReorgBlocks.chainIds,
|
|
291
|
+
safeReorgBlocks.blockNumbers
|
|
292
|
+
], {prepare: true});
|
|
293
|
+
}
|
|
294
|
+
|
|
282
295
|
exports.RowAction = RowAction;
|
|
283
296
|
exports.entityIdOnlySchema = entityIdOnlySchema;
|
|
284
297
|
exports.previousHistoryFieldsSchema = previousHistoryFieldsSchema;
|
|
285
298
|
exports.currentHistoryFieldsSchema = currentHistoryFieldsSchema;
|
|
286
299
|
exports.makeHistoryRowSchema = makeHistoryRowSchema;
|
|
287
300
|
exports.fromTable = fromTable;
|
|
301
|
+
exports.makePruneStaleEntityHistoryQuery = makePruneStaleEntityHistoryQuery;
|
|
302
|
+
exports.pruneStaleEntityHistory = pruneStaleEntityHistory;
|
|
288
303
|
/* schema Not a pure module */
|
package/src/db/Table.res
CHANGED
|
@@ -240,6 +240,7 @@ let toSqlParams = (table: table, ~schema, ~pgSchema) => {
|
|
|
240
240
|
switch field {
|
|
241
241
|
| Field(f) =>
|
|
242
242
|
switch f.fieldType {
|
|
243
|
+
// The case for `BigDecimal! @config(precision: 10, scale: 8)`
|
|
243
244
|
| Custom(fieldType) if fieldType->Js.String2.startsWith("NUMERIC(") => fieldType
|
|
244
245
|
| Custom(fieldType) => `${(Text :> string)}[]::"${pgSchema}".${(fieldType :> string)}`
|
|
245
246
|
| Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
|
|
@@ -198,7 +198,7 @@ let getSourceNewHeight = async (
|
|
|
198
198
|
logger->Logging.childTrace({
|
|
199
199
|
"msg": `Height retrieval from ${source.name} source failed. Retrying in ${retryInterval->Int.toString}ms.`,
|
|
200
200
|
"source": source.name,
|
|
201
|
-
"err": exn->
|
|
201
|
+
"err": exn->Utils.prettifyExn,
|
|
202
202
|
})
|
|
203
203
|
retry := retry.contents + 1
|
|
204
204
|
await Utils.delay(retryInterval)
|
|
@@ -410,7 +410,7 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
410
410
|
| FailedParsingItems({exn, message, blockNumber, logIndex}) =>
|
|
411
411
|
logger->Logging.childError({
|
|
412
412
|
"msg": message,
|
|
413
|
-
"err": exn->
|
|
413
|
+
"err": exn->Utils.prettifyExn,
|
|
414
414
|
"blockNumber": blockNumber,
|
|
415
415
|
"logIndex": logIndex,
|
|
416
416
|
})
|
|
@@ -469,7 +469,7 @@ let executeQuery = async (sourceManager: t, ~query: FetchState.query, ~currentBl
|
|
|
469
469
|
"toBlock": attemptedToBlock,
|
|
470
470
|
"backOffMilliseconds": backoffMillis,
|
|
471
471
|
"retry": retry,
|
|
472
|
-
"err": exn->
|
|
472
|
+
"err": exn->Utils.prettifyExn,
|
|
473
473
|
})
|
|
474
474
|
|
|
475
475
|
let shouldSwitch = nextSource !== source
|
|
@@ -7,7 +7,6 @@ var Hrtime = require("../bindings/Hrtime.res.js");
|
|
|
7
7
|
var Js_exn = require("rescript/lib/js/js_exn.js");
|
|
8
8
|
var Source = require("./Source.res.js");
|
|
9
9
|
var Logging = require("../Logging.res.js");
|
|
10
|
-
var Internal = require("../Internal.res.js");
|
|
11
10
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
12
11
|
var FetchState = require("../FetchState.res.js");
|
|
13
12
|
var Prometheus = require("../Prometheus.res.js");
|
|
@@ -142,7 +141,7 @@ async function getSourceNewHeight(sourceManager, source, currentBlockHeight, sta
|
|
|
142
141
|
Logging.childTrace(logger, {
|
|
143
142
|
msg: "Height retrieval from " + source.name + " source failed. Retrying in " + String(retryInterval) + "ms.",
|
|
144
143
|
source: source.name,
|
|
145
|
-
err:
|
|
144
|
+
err: Utils.prettifyExn(exn)
|
|
146
145
|
});
|
|
147
146
|
retry = retry + 1 | 0;
|
|
148
147
|
await Utils.delay(retryInterval);
|
|
@@ -298,7 +297,7 @@ async function executeQuery(sourceManager, query, currentBlockHeight) {
|
|
|
298
297
|
toBlock: attemptedToBlock,
|
|
299
298
|
backOffMilliseconds: backoffMillis,
|
|
300
299
|
retry: retry,
|
|
301
|
-
err:
|
|
300
|
+
err: Utils.prettifyExn(error$1.exn)
|
|
302
301
|
});
|
|
303
302
|
var shouldSwitch = nextSource !== source;
|
|
304
303
|
if (shouldSwitch) {
|
|
@@ -332,7 +331,7 @@ async function executeQuery(sourceManager, query, currentBlockHeight) {
|
|
|
332
331
|
if (exit === 1) {
|
|
333
332
|
Logging.childError(logger, {
|
|
334
333
|
msg: error$1.message,
|
|
335
|
-
err:
|
|
334
|
+
err: Utils.prettifyExn(error$1.exn),
|
|
336
335
|
blockNumber: error$1.blockNumber,
|
|
337
336
|
logIndex: error$1.logIndex
|
|
338
337
|
});
|