envio 2.12.2 → 2.12.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/ReorgDetection.res +189 -369
- package/src/db/EntityHistory.res +4 -2
- package/src/db/Table.res +6 -10
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.12.
|
|
3
|
+
"version": "v2.12.4",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"repository": {
|
|
@@ -23,10 +23,10 @@
|
|
|
23
23
|
},
|
|
24
24
|
"homepage": "https://envio.dev",
|
|
25
25
|
"optionalDependencies": {
|
|
26
|
-
"envio-linux-x64": "v2.12.
|
|
27
|
-
"envio-linux-arm64": "v2.12.
|
|
28
|
-
"envio-darwin-x64": "v2.12.
|
|
29
|
-
"envio-darwin-arm64": "v2.12.
|
|
26
|
+
"envio-linux-x64": "v2.12.4",
|
|
27
|
+
"envio-linux-arm64": "v2.12.4",
|
|
28
|
+
"envio-darwin-x64": "v2.12.4",
|
|
29
|
+
"envio-darwin-arm64": "v2.12.4"
|
|
30
30
|
},
|
|
31
31
|
"dependencies": {
|
|
32
32
|
"@envio-dev/hypersync-client": "0.6.3",
|
package/src/ReorgDetection.res
CHANGED
|
@@ -1,19 +1,39 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
open Belt
|
|
2
|
+
|
|
3
|
+
type blockDataWithTimestamp = {
|
|
3
4
|
blockHash: string,
|
|
4
5
|
blockNumber: int,
|
|
6
|
+
blockTimestamp: int,
|
|
5
7
|
}
|
|
6
8
|
|
|
7
9
|
type blockData = {
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
blockTimestamp: int,
|
|
10
|
+
// Block hash is used for actual comparison to test for reorg
|
|
11
|
+
blockHash: string,
|
|
12
|
+
blockNumber: int,
|
|
12
13
|
}
|
|
13
14
|
|
|
15
|
+
external generalizeBlockDataWithTimestamp: blockDataWithTimestamp => blockData = "%identity"
|
|
16
|
+
|
|
14
17
|
type reorgGuard = {
|
|
15
18
|
lastBlockScannedData: blockData,
|
|
16
|
-
firstBlockParentNumberAndHash: option<
|
|
19
|
+
firstBlockParentNumberAndHash: option<blockData>,
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
type reorgDetected = {
|
|
23
|
+
scannedBlock: blockData,
|
|
24
|
+
receivedBlock: blockData,
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
let reorgDetectedToLogParams = (reorgDetected: reorgDetected, ~shouldRollbackOnReorg) => {
|
|
28
|
+
let {scannedBlock, receivedBlock} = reorgDetected
|
|
29
|
+
{
|
|
30
|
+
"msg": `Blockchain reorg detected. ${shouldRollbackOnReorg
|
|
31
|
+
? "Initiating indexer rollback"
|
|
32
|
+
: "NOT initiating indexer rollback due to configuration"}.`,
|
|
33
|
+
"blockNumber": scannedBlock.blockNumber,
|
|
34
|
+
"indexedBlockHash": scannedBlock.blockHash,
|
|
35
|
+
"receivedBlockHash": receivedBlock.blockHash,
|
|
36
|
+
}
|
|
17
37
|
}
|
|
18
38
|
|
|
19
39
|
module LastBlockScannedHashes: {
|
|
@@ -24,63 +44,28 @@ module LastBlockScannedHashes: {
|
|
|
24
44
|
/**Instantiat empty t with no block data*/
|
|
25
45
|
let empty: (~confirmedBlockThreshold: int) => t
|
|
26
46
|
|
|
27
|
-
/**
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
/**Read the latest last block scanned data at the from the front of the queue*/
|
|
31
|
-
let getLatestLastBlockData: t => option<blockData>
|
|
32
|
-
/** Given the head block number, find the earliest timestamp from the data where the data
|
|
33
|
-
is still within the given block threshold from the head
|
|
34
|
-
*/
|
|
35
|
-
let getEarlistTimestampInThreshold: (~currentHeight: int, t) => option<int>
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
Prunes the back of the unneeded data on the queue.
|
|
39
|
-
|
|
40
|
-
In the case of a multichain indexer, pass in the earliest needed timestamp that
|
|
41
|
-
occurs within the chains threshold. Ensure that we keep track of one range before that
|
|
42
|
-
as this is that could be the target range block for a reorg
|
|
47
|
+
/** Registers a new reorg guard, prunes unnened data and returns the updated data
|
|
48
|
+
or an error if a reorg has occured
|
|
43
49
|
*/
|
|
44
|
-
let
|
|
45
|
-
~currentHeight: int,
|
|
46
|
-
~earliestMultiChainTimestampInThreshold: int=?,
|
|
50
|
+
let registerReorgGuard: (
|
|
47
51
|
t,
|
|
48
|
-
|
|
52
|
+
~reorgGuard: reorgGuard,
|
|
53
|
+
~currentBlockHeight: int,
|
|
54
|
+
) => result<t, reorgDetected>
|
|
49
55
|
|
|
50
56
|
/**
|
|
51
|
-
|
|
52
|
-
|
|
57
|
+
Returns the latest block data which matches block number and hashes in the provided array
|
|
58
|
+
If it doesn't exist in the reorg threshold it returns None or the latest scanned block outside of the reorg threshold
|
|
53
59
|
*/
|
|
54
|
-
let
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
need to be zipped
|
|
60
|
-
*/
|
|
61
|
-
type currentHeightAndLastBlockHashes = {
|
|
62
|
-
currentHeight: int,
|
|
63
|
-
lastBlockScannedHashes: t,
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
/**
|
|
67
|
-
Finds the earliest timestamp that is withtin the confirmedBlockThreshold of
|
|
68
|
-
each chain in a multi chain indexer. Returns None if its a single chain or if
|
|
69
|
-
the list is empty
|
|
70
|
-
*/
|
|
71
|
-
let getEarliestMultiChainTimestampInThreshold: array<currentHeightAndLastBlockHashes> => option<
|
|
72
|
-
int,
|
|
73
|
-
>
|
|
60
|
+
let getLatestValidScannedBlock: (
|
|
61
|
+
t,
|
|
62
|
+
~blockNumbersAndHashes: array<blockDataWithTimestamp>,
|
|
63
|
+
~currentBlockHeight: int,
|
|
64
|
+
) => option<blockDataWithTimestamp>
|
|
74
65
|
|
|
75
66
|
let getThresholdBlockNumbers: (t, ~currentBlockHeight: int) => array<int>
|
|
76
67
|
|
|
77
|
-
let
|
|
78
|
-
|
|
79
|
-
/**
|
|
80
|
-
Return a BlockNumbersAndHashes.t rolled back to where blockData is less
|
|
81
|
-
than the provided blockNumber
|
|
82
|
-
*/
|
|
83
|
-
let rollBackToBlockNumberLt: (~blockNumber: int, t) => t
|
|
68
|
+
let rollbackToValidBlockNumber: (t, ~blockNumber: int) => t
|
|
84
69
|
} = {
|
|
85
70
|
type t = {
|
|
86
71
|
// Number of blocks behind head, we want to keep track
|
|
@@ -88,359 +73,194 @@ module LastBlockScannedHashes: {
|
|
|
88
73
|
// it means we are accounting for reorgs up to 200 blocks
|
|
89
74
|
// behind the head
|
|
90
75
|
confirmedBlockThreshold: int,
|
|
91
|
-
// A
|
|
92
|
-
// for reorgs.
|
|
93
|
-
|
|
94
|
-
lastBlockScannedDataList: list<blockData>,
|
|
76
|
+
// A hash map of recent blockdata by block number to make comparison checks
|
|
77
|
+
// for reorgs.
|
|
78
|
+
dataByBlockNumber: dict<blockData>,
|
|
95
79
|
}
|
|
96
80
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
confirmedBlockThreshold,
|
|
100
|
-
lastBlockScannedDataList,
|
|
101
|
-
}
|
|
81
|
+
let makeWithData = (blocks, ~confirmedBlockThreshold) => {
|
|
82
|
+
let dataByBlockNumber = Js.Dict.empty()
|
|
102
83
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
->Belt.List.reverse
|
|
107
|
-
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
108
|
-
//Instantiates empty LastBlockHashes
|
|
109
|
-
let empty = (~confirmedBlockThreshold) => makeWithDataInternal(list{}, ~confirmedBlockThreshold)
|
|
84
|
+
blocks->Belt.Array.forEach(block => {
|
|
85
|
+
dataByBlockNumber->Js.Dict.set(block.blockNumber->Js.Int.toString, block)
|
|
86
|
+
})
|
|
110
87
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
let rec getEarlistTimestampInThresholdInternal = (
|
|
115
|
-
// The current block number at the head of the chain
|
|
116
|
-
~currentHeight,
|
|
117
|
-
~confirmedBlockThreshold,
|
|
118
|
-
//reversed so that head to tail is earlist to latest
|
|
119
|
-
reversedLastBlockDataList: list<blockData>,
|
|
120
|
-
): option<int> => {
|
|
121
|
-
switch reversedLastBlockDataList {
|
|
122
|
-
| list{lastBlockScannedData, ...tail} =>
|
|
123
|
-
// If the blocknumber is not in the threshold recurse with given blockdata's
|
|
124
|
-
// timestamp , incrementing the from index
|
|
125
|
-
if lastBlockScannedData.blockNumber >= currentHeight - confirmedBlockThreshold {
|
|
126
|
-
// If it's in the threshold return the last earliest timestamp
|
|
127
|
-
Some(lastBlockScannedData.blockTimestamp)
|
|
128
|
-
} else {
|
|
129
|
-
tail->getEarlistTimestampInThresholdInternal(~currentHeight, ~confirmedBlockThreshold)
|
|
130
|
-
}
|
|
131
|
-
| list{} => None
|
|
88
|
+
{
|
|
89
|
+
confirmedBlockThreshold,
|
|
90
|
+
dataByBlockNumber,
|
|
132
91
|
}
|
|
133
92
|
}
|
|
93
|
+
//Instantiates empty LastBlockHashes
|
|
94
|
+
let empty = (~confirmedBlockThreshold) => {
|
|
95
|
+
confirmedBlockThreshold,
|
|
96
|
+
dataByBlockNumber: Js.Dict.empty(),
|
|
97
|
+
}
|
|
134
98
|
|
|
135
|
-
let
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
) =>
|
|
139
|
-
lastBlockScannedDataList
|
|
140
|
-
->Belt.List.reverse
|
|
141
|
-
->getEarlistTimestampInThresholdInternal(~currentHeight, ~confirmedBlockThreshold)
|
|
142
|
-
|
|
143
|
-
/**
|
|
144
|
-
Inserts last scanned blockData in its positional order of blockNumber. Adds would usually
|
|
145
|
-
be always appending to the head with a new last scanned blockData but could be earlier in the
|
|
146
|
-
case of a dynamic contract.
|
|
147
|
-
*/
|
|
148
|
-
let rec addLatestLastBlockDataInternal = (
|
|
149
|
-
~lastBlockScannedData,
|
|
150
|
-
//Default empty, accumRev would be each item part of lastBlockScannedDataList that has
|
|
151
|
-
//a higher blockNumber than lastBlockScannedData
|
|
152
|
-
~accumRev=list{},
|
|
153
|
-
lastBlockScannedDataList,
|
|
99
|
+
let getDataByBlockNumberCopyInThreshold = (
|
|
100
|
+
{dataByBlockNumber, confirmedBlockThreshold}: t,
|
|
101
|
+
~currentBlockHeight,
|
|
154
102
|
) => {
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
103
|
+
// Js engine automatically orders numeric object keys
|
|
104
|
+
let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys
|
|
105
|
+
let thresholdBlockNumber = currentBlockHeight - confirmedBlockThreshold
|
|
106
|
+
|
|
107
|
+
let copy = Js.Dict.empty()
|
|
108
|
+
|
|
109
|
+
for idx in 0 to ascBlockNumberKeys->Array.length - 1 {
|
|
110
|
+
let blockNumberKey = ascBlockNumberKeys->Js.Array2.unsafe_get(idx)
|
|
111
|
+
let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
|
|
112
|
+
let isInReorgThreshold = scannedBlock.blockNumber >= thresholdBlockNumber
|
|
113
|
+
if isInReorgThreshold {
|
|
114
|
+
copy->Js.Dict.set(blockNumberKey, scannedBlock)
|
|
164
115
|
}
|
|
165
|
-
| list{} => Belt.List.reverseConcat(accumRev, list{lastBlockScannedData})
|
|
166
116
|
}
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
// Adds the latest blockData to the head of the list
|
|
170
|
-
let addLatestLastBlockData = (
|
|
171
|
-
{confirmedBlockThreshold, lastBlockScannedDataList}: t,
|
|
172
|
-
~lastBlockScannedData,
|
|
173
|
-
) =>
|
|
174
|
-
lastBlockScannedDataList
|
|
175
|
-
->addLatestLastBlockDataInternal(~lastBlockScannedData)
|
|
176
|
-
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
177
|
-
|
|
178
|
-
let getLatestLastBlockData = (self: t) => self.lastBlockScannedDataList->Belt.List.head
|
|
179
|
-
|
|
180
|
-
let blockDataIsPastThreshold = (
|
|
181
|
-
lastBlockScannedData: blockData,
|
|
182
|
-
~currentHeight: int,
|
|
183
|
-
~confirmedBlockThreshold: int,
|
|
184
|
-
) => lastBlockScannedData.blockNumber < currentHeight - confirmedBlockThreshold
|
|
185
117
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
/**
|
|
189
|
-
Trampolines are a method of handling mutual recursions without the risk of hitting stack limits
|
|
190
|
-
|
|
191
|
-
Tail Call Optimization is not possible on mutually recursive functions and so this is a manual optizimation
|
|
192
|
-
|
|
193
|
-
(note: this implementation of "trampoline" uses a tail call and so TCO tranfsorms it to a while loop in JS)
|
|
194
|
-
*/
|
|
195
|
-
let rec trampoline = value =>
|
|
196
|
-
switch value {
|
|
197
|
-
| Data(v) => v
|
|
198
|
-
| Callback(fn) => fn()->trampoline
|
|
199
|
-
}
|
|
118
|
+
copy
|
|
119
|
+
}
|
|
200
120
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
~
|
|
204
|
-
~
|
|
205
|
-
~confirmedBlockThreshold,
|
|
206
|
-
lastBlockScannedDataListReversed: list<blockData>,
|
|
121
|
+
let registerReorgGuard = (
|
|
122
|
+
{confirmedBlockThreshold} as self: t,
|
|
123
|
+
~reorgGuard: reorgGuard,
|
|
124
|
+
~currentBlockHeight,
|
|
207
125
|
) => {
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
)
|
|
221
|
-
|
|
|
222
|
-
switch
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
} else {
|
|
238
|
-
Data(lastBlockScannedDataListReversed)
|
|
126
|
+
let dataByBlockNumberCopyInThreshold =
|
|
127
|
+
self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
|
|
128
|
+
|
|
129
|
+
let {lastBlockScannedData, firstBlockParentNumberAndHash} = reorgGuard
|
|
130
|
+
|
|
131
|
+
let maybeReorgDetected = switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption(
|
|
132
|
+
lastBlockScannedData.blockNumber->Int.toString,
|
|
133
|
+
) {
|
|
134
|
+
| Some(scannedBlock) if scannedBlock.blockHash !== lastBlockScannedData.blockHash =>
|
|
135
|
+
Some({
|
|
136
|
+
receivedBlock: lastBlockScannedData,
|
|
137
|
+
scannedBlock,
|
|
138
|
+
})
|
|
139
|
+
| _ =>
|
|
140
|
+
switch firstBlockParentNumberAndHash {
|
|
141
|
+
//If parentHash is None, then it's the genesis block (no reorg)
|
|
142
|
+
//Need to check that parentHash matches because of the dynamic contracts
|
|
143
|
+
| None => None
|
|
144
|
+
| Some(firstBlockParentNumberAndHash) =>
|
|
145
|
+
switch dataByBlockNumberCopyInThreshold->Utils.Dict.dangerouslyGetNonOption(
|
|
146
|
+
firstBlockParentNumberAndHash.blockNumber->Int.toString,
|
|
147
|
+
) {
|
|
148
|
+
| Some(scannedBlock)
|
|
149
|
+
if scannedBlock.blockHash !== firstBlockParentNumberAndHash.blockHash =>
|
|
150
|
+
Some({
|
|
151
|
+
receivedBlock: firstBlockParentNumberAndHash,
|
|
152
|
+
scannedBlock,
|
|
153
|
+
})
|
|
154
|
+
| _ => None
|
|
239
155
|
}
|
|
240
|
-
| list{_} | list{} => Data(lastBlockScannedDataListReversed)
|
|
241
156
|
}
|
|
242
157
|
}
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
switch lastBlockScannedDataListReversed {
|
|
251
|
-
| list{earliestLastBlockData, ...tail} =>
|
|
252
|
-
// In the case that back is past the threshold, remove it and
|
|
253
|
-
// recurse
|
|
254
|
-
if earliestLastBlockData->blockDataIsPastThreshold(~currentHeight, ~confirmedBlockThreshold) {
|
|
255
|
-
// Recurse to check the next item
|
|
256
|
-
Callback(
|
|
257
|
-
() =>
|
|
258
|
-
tail->pruneStaleBlockDataInternal(
|
|
259
|
-
~currentHeight,
|
|
260
|
-
~earliestMultiChainTimestampInThreshold,
|
|
261
|
-
~confirmedBlockThreshold,
|
|
262
|
-
),
|
|
158
|
+
|
|
159
|
+
switch maybeReorgDetected {
|
|
160
|
+
| Some(reorgDetected) => Error(reorgDetected)
|
|
161
|
+
| None => {
|
|
162
|
+
dataByBlockNumberCopyInThreshold->Js.Dict.set(
|
|
163
|
+
lastBlockScannedData.blockNumber->Int.toString,
|
|
164
|
+
lastBlockScannedData,
|
|
263
165
|
)
|
|
264
|
-
|
|
265
|
-
|
|
166
|
+
switch firstBlockParentNumberAndHash {
|
|
167
|
+
| None => ()
|
|
168
|
+
| Some(firstBlockParentNumberAndHash) =>
|
|
169
|
+
dataByBlockNumberCopyInThreshold->Js.Dict.set(
|
|
170
|
+
firstBlockParentNumberAndHash.blockNumber->Int.toString,
|
|
171
|
+
firstBlockParentNumberAndHash,
|
|
172
|
+
)
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
Ok({
|
|
176
|
+
confirmedBlockThreshold,
|
|
177
|
+
dataByBlockNumber: dataByBlockNumberCopyInThreshold,
|
|
178
|
+
})
|
|
266
179
|
}
|
|
267
|
-
| list{} => Data(list{})
|
|
268
180
|
}
|
|
269
181
|
}
|
|
270
182
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
~
|
|
274
|
-
~
|
|
275
|
-
{confirmedBlockThreshold, lastBlockScannedDataList}: t,
|
|
183
|
+
let getLatestValidScannedBlock = (
|
|
184
|
+
self: t,
|
|
185
|
+
~blockNumbersAndHashes: array<blockDataWithTimestamp>,
|
|
186
|
+
~currentBlockHeight,
|
|
276
187
|
) => {
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
->
|
|
280
|
-
|
|
281
|
-
~confirmedBlockThreshold,
|
|
282
|
-
~currentHeight,
|
|
283
|
-
~earliestMultiChainTimestampInThreshold,
|
|
284
|
-
),
|
|
285
|
-
)
|
|
286
|
-
->Belt.List.reverse
|
|
287
|
-
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
type blockNumberToHashMap = Belt.Map.Int.t<string>
|
|
291
|
-
exception BlockNotIncludedInMap(int)
|
|
188
|
+
let verifiedDataByBlockNumber = Js.Dict.empty()
|
|
189
|
+
blockNumbersAndHashes->Array.forEach(blockData => {
|
|
190
|
+
verifiedDataByBlockNumber->Js.Dict.set(blockData.blockNumber->Int.toString, blockData)
|
|
191
|
+
})
|
|
292
192
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
let
|
|
193
|
+
let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
|
|
194
|
+
// Js engine automatically orders numeric object keys
|
|
195
|
+
let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys
|
|
296
196
|
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
197
|
+
let getPrevScannedBlock = idx =>
|
|
198
|
+
ascBlockNumberKeys
|
|
199
|
+
->Belt.Array.get(idx - 1)
|
|
200
|
+
->Option.flatMap(key => {
|
|
201
|
+
// We should already validate that the block number is verified at the point
|
|
202
|
+
verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key)
|
|
203
|
+
})
|
|
302
204
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
} else {
|
|
316
|
-
tail->rollBackToValidHashInternal(~latestBlockHashes)
|
|
205
|
+
let rec loop = idx => {
|
|
206
|
+
switch ascBlockNumberKeys->Belt.Array.get(idx) {
|
|
207
|
+
| Some(blockNumberKey) =>
|
|
208
|
+
let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
|
|
209
|
+
switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) {
|
|
210
|
+
| None =>
|
|
211
|
+
Js.Exn.raiseError(
|
|
212
|
+
`Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`,
|
|
213
|
+
)
|
|
214
|
+
| Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash =>
|
|
215
|
+
loop(idx + 1)
|
|
216
|
+
| Some(_) => getPrevScannedBlock(idx)
|
|
317
217
|
}
|
|
318
|
-
|
|
218
|
+
| None => getPrevScannedBlock(idx)
|
|
219
|
+
}
|
|
319
220
|
}
|
|
221
|
+
loop(0)
|
|
320
222
|
}
|
|
321
223
|
|
|
322
224
|
/**
|
|
323
|
-
Return a BlockNumbersAndHashes.t rolled back to where
|
|
324
|
-
|
|
225
|
+
Return a BlockNumbersAndHashes.t rolled back to where blockData is less
|
|
226
|
+
than the provided blockNumber
|
|
325
227
|
*/
|
|
326
|
-
let
|
|
327
|
-
|
|
328
|
-
let latestBlockHashes =
|
|
329
|
-
blockNumbersAndHashes
|
|
330
|
-
->Belt.Array.map(({blockNumber, blockHash}) => (blockNumber, blockHash))
|
|
331
|
-
->Belt.Map.Int.fromArray
|
|
332
|
-
|
|
333
|
-
lastBlockScannedDataList
|
|
334
|
-
->rollBackToValidHashInternal(~latestBlockHashes)
|
|
335
|
-
->Belt.Result.map(list => list->makeWithDataInternal(~confirmedBlockThreshold))
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
let min = (arrInt: array<int>) => {
|
|
339
|
-
arrInt->Belt.Array.reduce(None, (current, val) => {
|
|
340
|
-
switch current {
|
|
341
|
-
| None => Some(val)
|
|
342
|
-
| Some(current) => Js.Math.min_int(current, val)->Some
|
|
343
|
-
}
|
|
344
|
-
})
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
let rec rollBackToBlockNumberLtInternal = (
|
|
228
|
+
let rollbackToValidBlockNumber = (
|
|
229
|
+
{dataByBlockNumber, confirmedBlockThreshold}: t,
|
|
348
230
|
~blockNumber: int,
|
|
349
|
-
latestBlockScannedData: list<blockData>,
|
|
350
231
|
) => {
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
232
|
+
// Js engine automatically orders numeric object keys
|
|
233
|
+
let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys
|
|
234
|
+
|
|
235
|
+
let newDataByBlockNumber = Js.Dict.empty()
|
|
236
|
+
|
|
237
|
+
let rec loop = idx => {
|
|
238
|
+
switch ascBlockNumberKeys->Belt.Array.get(idx) {
|
|
239
|
+
| Some(blockNumberKey) => {
|
|
240
|
+
let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
|
|
241
|
+
let shouldKeep = scannedBlock.blockNumber <= blockNumber
|
|
242
|
+
if shouldKeep {
|
|
243
|
+
newDataByBlockNumber->Js.Dict.set(blockNumberKey, scannedBlock)
|
|
244
|
+
loop(idx + 1)
|
|
245
|
+
} else {
|
|
246
|
+
()
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
| None => ()
|
|
358
250
|
}
|
|
359
251
|
}
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
/**
|
|
363
|
-
Return a BlockNumbersAndHashes.t rolled back to where blockData is less
|
|
364
|
-
than the provided blockNumber
|
|
365
|
-
*/
|
|
366
|
-
let rollBackToBlockNumberLt = (~blockNumber: int, self: t) => {
|
|
367
|
-
let {confirmedBlockThreshold, lastBlockScannedDataList} = self
|
|
368
|
-
lastBlockScannedDataList
|
|
369
|
-
->rollBackToBlockNumberLtInternal(~blockNumber)
|
|
370
|
-
->makeWithDataInternal(~confirmedBlockThreshold)
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
type currentHeightAndLastBlockHashes = {
|
|
374
|
-
currentHeight: int,
|
|
375
|
-
lastBlockScannedHashes: t,
|
|
376
|
-
}
|
|
377
|
-
|
|
378
|
-
/**
|
|
379
|
-
Find the the earliest block time across multiple instances of self where the block timestamp
|
|
380
|
-
falls within its own confirmed block threshold
|
|
252
|
+
loop(0)
|
|
381
253
|
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
let getEarliestMultiChainTimestampInThreshold = (
|
|
386
|
-
multiSelf: array<currentHeightAndLastBlockHashes>,
|
|
387
|
-
) => {
|
|
388
|
-
switch multiSelf {
|
|
389
|
-
| [_singleVal] =>
|
|
390
|
-
//In the case where there is only one chain, return none as there would be no need to aggregate
|
|
391
|
-
//or keep track of the lowest timestamp. The chain can purge as far back as its confirmed block range
|
|
392
|
-
None
|
|
393
|
-
| multiSelf =>
|
|
394
|
-
multiSelf
|
|
395
|
-
->Belt.Array.keepMap(({currentHeight, lastBlockScannedHashes}) => {
|
|
396
|
-
lastBlockScannedHashes->getEarlistTimestampInThreshold(~currentHeight)
|
|
397
|
-
})
|
|
398
|
-
->min
|
|
254
|
+
{
|
|
255
|
+
confirmedBlockThreshold,
|
|
256
|
+
dataByBlockNumber: newDataByBlockNumber,
|
|
399
257
|
}
|
|
400
258
|
}
|
|
401
259
|
|
|
402
260
|
let getThresholdBlockNumbers = (self: t, ~currentBlockHeight) => {
|
|
403
|
-
let
|
|
404
|
-
|
|
405
|
-
self.lastBlockScannedDataList->Belt.List.forEach(v => {
|
|
406
|
-
if v.blockNumber >= thresholdBlocknumber {
|
|
407
|
-
blockNumbers->Belt.Array.push(v.blockNumber)
|
|
408
|
-
}
|
|
409
|
-
})
|
|
410
|
-
blockNumbers
|
|
411
|
-
}
|
|
261
|
+
let dataByBlockNumberCopyInThreshold =
|
|
262
|
+
self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
|
|
412
263
|
|
|
413
|
-
|
|
414
|
-
Checks whether reorg has occured by comparing the parent hash with the last saved block hash.
|
|
415
|
-
*/
|
|
416
|
-
let rec hasReorgOccurredInternal = (lastBlockScannedDataList, ~reorgGuard: reorgGuard) => {
|
|
417
|
-
switch lastBlockScannedDataList {
|
|
418
|
-
| list{head, ...tail} =>
|
|
419
|
-
switch reorgGuard {
|
|
420
|
-
| {lastBlockScannedData} if lastBlockScannedData.blockNumber == head.blockNumber =>
|
|
421
|
-
lastBlockScannedData.blockHash != head.blockHash
|
|
422
|
-
//If parentHash is None, either it's the genesis block (no reorg)
|
|
423
|
-
//Or its already confirmed so no Reorg
|
|
424
|
-
| {firstBlockParentNumberAndHash: None} => false
|
|
425
|
-
| {
|
|
426
|
-
firstBlockParentNumberAndHash: Some({
|
|
427
|
-
blockHash: parentHash,
|
|
428
|
-
blockNumber: parentBlockNumber,
|
|
429
|
-
}),
|
|
430
|
-
} =>
|
|
431
|
-
if parentBlockNumber == head.blockNumber {
|
|
432
|
-
parentHash != head.blockHash
|
|
433
|
-
} else {
|
|
434
|
-
//if block numbers do not match, this is a dynamic contract case and should recurse
|
|
435
|
-
//through the list to look for a matching block or nothing to validate
|
|
436
|
-
tail->hasReorgOccurredInternal(~reorgGuard)
|
|
437
|
-
}
|
|
438
|
-
}
|
|
439
|
-
//If recentLastBlockData is None, we have not yet saved blockData to compare against
|
|
440
|
-
| _ => false
|
|
441
|
-
}
|
|
264
|
+
dataByBlockNumberCopyInThreshold->Js.Dict.values->Js.Array2.map(v => v.blockNumber)
|
|
442
265
|
}
|
|
443
|
-
|
|
444
|
-
let hasReorgOccurred = (lastBlockScannedHashes: t, ~reorgGuard: reorgGuard) =>
|
|
445
|
-
lastBlockScannedHashes.lastBlockScannedDataList->hasReorgOccurredInternal(~reorgGuard)
|
|
446
266
|
}
|
package/src/db/EntityHistory.res
CHANGED
|
@@ -231,10 +231,12 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
|
231
231
|
let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName)
|
|
232
232
|
|
|
233
233
|
let originTableName = table.tableName
|
|
234
|
+
let originSchemaName = table.schemaName
|
|
234
235
|
let historyTableName = originTableName ++ "_history"
|
|
235
236
|
//ignore composite indices
|
|
236
237
|
let table = mkTable(
|
|
237
238
|
historyTableName,
|
|
239
|
+
~schemaName=originSchemaName,
|
|
238
240
|
~fields=Belt.Array.concatMany([
|
|
239
241
|
currentHistoryFields,
|
|
240
242
|
previousHistoryFields,
|
|
@@ -245,8 +247,8 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
|
|
|
245
247
|
|
|
246
248
|
let insertFnName = `"insert_${table.tableName}"`
|
|
247
249
|
let historyRowArg = "history_row"
|
|
248
|
-
let historyTablePath = `"
|
|
249
|
-
let originTablePath = `"
|
|
250
|
+
let historyTablePath = `"${originSchemaName}"."${historyTableName}"`
|
|
251
|
+
let originTablePath = `"${originSchemaName}"."${originTableName}"`
|
|
250
252
|
|
|
251
253
|
let previousHistoryFieldsAreNullStr =
|
|
252
254
|
previousChangeFieldNames
|
package/src/db/Table.res
CHANGED
|
@@ -86,16 +86,14 @@ let getFieldType = (field: field) => {
|
|
|
86
86
|
|
|
87
87
|
type table = {
|
|
88
88
|
tableName: string,
|
|
89
|
+
schemaName: string,
|
|
89
90
|
fields: array<fieldOrDerived>,
|
|
90
91
|
compositeIndices: array<array<string>>,
|
|
91
92
|
}
|
|
92
93
|
|
|
93
|
-
let mkTable
|
|
94
|
-
~compositeIndices: array<array<string>>=?,
|
|
95
|
-
~fields: array<fieldOrDerived>,
|
|
96
|
-
string,
|
|
97
|
-
) => 'c = (~compositeIndices=[], ~fields, tableName) => {
|
|
94
|
+
let mkTable = (tableName, ~schemaName, ~compositeIndices=[], ~fields) => {
|
|
98
95
|
tableName,
|
|
96
|
+
schemaName,
|
|
99
97
|
fields,
|
|
100
98
|
compositeIndices,
|
|
101
99
|
}
|
|
@@ -213,10 +211,7 @@ let toSqlParams = (table: table, ~schema) => {
|
|
|
213
211
|
}
|
|
214
212
|
| Bool =>
|
|
215
213
|
// Workaround for https://github.com/porsager/postgres/issues/471
|
|
216
|
-
S.union([
|
|
217
|
-
S.literal("t")->S.to(_ => true),
|
|
218
|
-
S.literal("f")->S.to(_ => false),
|
|
219
|
-
])->S.toUnknown
|
|
214
|
+
S.union([S.literal(1)->S.to(_ => true), S.literal(0)->S.to(_ => false)])->S.toUnknown
|
|
220
215
|
| _ => schema
|
|
221
216
|
}
|
|
222
217
|
|
|
@@ -242,6 +237,7 @@ let toSqlParams = (table: table, ~schema) => {
|
|
|
242
237
|
| Field(f) =>
|
|
243
238
|
switch f.fieldType {
|
|
244
239
|
| Custom(fieldType) => `${(Text :> string)}[]::${(fieldType :> string)}`
|
|
240
|
+
| Boolean => `${(Integer :> string)}[]::${(f.fieldType :> string)}`
|
|
245
241
|
| fieldType => (fieldType :> string)
|
|
246
242
|
}
|
|
247
243
|
| DerivedFrom(_) => (Text :> string)
|
|
@@ -308,7 +304,7 @@ module PostgresInterop = {
|
|
|
308
304
|
table->getNonDefaultFieldNames->Array.map(fieldName => `"${fieldName}"`)
|
|
309
305
|
`(sql, rows) => {
|
|
310
306
|
return sql\`
|
|
311
|
-
INSERT INTO "
|
|
307
|
+
INSERT INTO "${table.schemaName}"."${table.tableName}"
|
|
312
308
|
\${sql(rows, ${fieldNamesInQuotes->Js.Array2.joinWith(", ")})}
|
|
313
309
|
ON CONFLICT(${table->getPrimaryKeyFieldNames->Js.Array2.joinWith(", ")}) DO UPDATE
|
|
314
310
|
SET
|