envio 3.0.0-alpha.2 → 3.0.0-alpha.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +164 -30
- package/bin.mjs +49 -0
- package/evm.schema.json +79 -169
- package/fuel.schema.json +50 -21
- package/index.d.ts +578 -1
- package/index.js +4 -0
- package/package.json +47 -31
- package/rescript.json +4 -1
- package/src/Batch.res +11 -8
- package/src/Batch.res.mjs +11 -9
- package/src/ChainFetcher.res +531 -0
- package/src/ChainFetcher.res.mjs +339 -0
- package/src/ChainManager.res +190 -0
- package/src/ChainManager.res.mjs +166 -0
- package/src/Change.res +3 -3
- package/src/Config.gen.ts +19 -0
- package/src/Config.res +725 -25
- package/src/Config.res.mjs +692 -26
- package/src/{Indexer.res → Ctx.res} +1 -1
- package/src/Ecosystem.res +9 -124
- package/src/Ecosystem.res.mjs +19 -160
- package/src/Env.res +33 -73
- package/src/Env.res.mjs +29 -85
- package/src/Envio.gen.ts +3 -1
- package/src/Envio.res +77 -9
- package/src/Envio.res.mjs +39 -1
- package/src/EventConfigBuilder.res +408 -0
- package/src/EventConfigBuilder.res.mjs +376 -0
- package/src/EventProcessing.res +469 -0
- package/src/EventProcessing.res.mjs +337 -0
- package/src/EvmTypes.gen.ts +6 -0
- package/src/EvmTypes.res +1 -0
- package/src/FetchState.res +1256 -639
- package/src/FetchState.res.mjs +1135 -612
- package/src/GlobalState.res +1224 -0
- package/src/GlobalState.res.mjs +1291 -0
- package/src/GlobalStateManager.res +68 -0
- package/src/GlobalStateManager.res.mjs +75 -0
- package/src/GlobalStateManager.resi +7 -0
- package/src/HandlerLoader.res +89 -0
- package/src/HandlerLoader.res.mjs +79 -0
- package/src/HandlerRegister.res +357 -0
- package/src/HandlerRegister.res.mjs +299 -0
- package/src/HandlerRegister.resi +30 -0
- package/src/Hasura.res +111 -175
- package/src/Hasura.res.mjs +88 -150
- package/src/InMemoryStore.res +1 -1
- package/src/InMemoryStore.res.mjs +3 -3
- package/src/InMemoryTable.res +1 -1
- package/src/InMemoryTable.res.mjs +1 -1
- package/src/Internal.gen.ts +6 -0
- package/src/Internal.res +265 -12
- package/src/Internal.res.mjs +115 -1
- package/src/LoadLayer.res +444 -0
- package/src/LoadLayer.res.mjs +296 -0
- package/src/LoadLayer.resi +32 -0
- package/src/LogSelection.res +33 -27
- package/src/LogSelection.res.mjs +6 -0
- package/src/Logging.res +21 -7
- package/src/Logging.res.mjs +16 -8
- package/src/Main.res +390 -0
- package/src/Main.res.mjs +341 -0
- package/src/Persistence.res +7 -21
- package/src/Persistence.res.mjs +3 -3
- package/src/PgStorage.gen.ts +10 -0
- package/src/PgStorage.res +116 -69
- package/src/PgStorage.res.d.mts +5 -0
- package/src/PgStorage.res.mjs +93 -50
- package/src/Prometheus.res +294 -224
- package/src/Prometheus.res.mjs +353 -340
- package/src/ReorgDetection.res +6 -10
- package/src/ReorgDetection.res.mjs +6 -6
- package/src/SafeCheckpointTracking.res +4 -4
- package/src/SafeCheckpointTracking.res.mjs +2 -2
- package/src/SimulateItems.res +353 -0
- package/src/SimulateItems.res.mjs +335 -0
- package/src/Sink.res +4 -2
- package/src/Sink.res.mjs +2 -1
- package/src/TableIndices.res +0 -1
- package/src/TestIndexer.res +913 -0
- package/src/TestIndexer.res.mjs +698 -0
- package/src/TestIndexerProxyStorage.res +205 -0
- package/src/TestIndexerProxyStorage.res.mjs +151 -0
- package/src/TopicFilter.res +1 -1
- package/src/Types.ts +1 -1
- package/src/UserContext.res +424 -0
- package/src/UserContext.res.mjs +279 -0
- package/src/Utils.res +97 -26
- package/src/Utils.res.mjs +91 -44
- package/src/bindings/BigInt.res +10 -0
- package/src/bindings/BigInt.res.mjs +15 -0
- package/src/bindings/ClickHouse.res +120 -23
- package/src/bindings/ClickHouse.res.mjs +118 -28
- package/src/bindings/DateFns.res +74 -0
- package/src/bindings/DateFns.res.mjs +22 -0
- package/src/bindings/EventSource.res +11 -2
- package/src/bindings/EventSource.res.mjs +8 -1
- package/src/bindings/Express.res +1 -0
- package/src/bindings/Hrtime.res +14 -1
- package/src/bindings/Hrtime.res.mjs +22 -2
- package/src/bindings/Hrtime.resi +4 -0
- package/src/bindings/Lodash.res +0 -1
- package/src/bindings/NodeJs.res +49 -3
- package/src/bindings/NodeJs.res.mjs +11 -3
- package/src/bindings/Pino.res +24 -10
- package/src/bindings/Pino.res.mjs +14 -8
- package/src/bindings/Postgres.gen.ts +8 -0
- package/src/bindings/Postgres.res +5 -1
- package/src/bindings/Postgres.res.d.mts +5 -0
- package/src/bindings/PromClient.res +0 -10
- package/src/bindings/PromClient.res.mjs +0 -3
- package/src/bindings/Vitest.res +144 -0
- package/src/bindings/Vitest.res.mjs +9 -0
- package/src/bindings/WebSocket.res +27 -0
- package/src/bindings/WebSocket.res.mjs +2 -0
- package/src/bindings/Yargs.res +8 -0
- package/src/bindings/Yargs.res.mjs +2 -0
- package/src/db/EntityHistory.res +7 -7
- package/src/db/EntityHistory.res.mjs +9 -9
- package/src/db/InternalTable.res +59 -111
- package/src/db/InternalTable.res.mjs +73 -104
- package/src/db/Table.res +27 -8
- package/src/db/Table.res.mjs +25 -14
- package/src/sources/Evm.res +84 -0
- package/src/sources/Evm.res.mjs +105 -0
- package/src/sources/EvmChain.res +94 -0
- package/src/sources/EvmChain.res.mjs +60 -0
- package/src/sources/Fuel.res +19 -34
- package/src/sources/Fuel.res.mjs +34 -16
- package/src/sources/FuelSDK.res +38 -0
- package/src/sources/FuelSDK.res.mjs +29 -0
- package/src/sources/HyperFuel.res +2 -2
- package/src/sources/HyperFuel.resi +1 -1
- package/src/sources/HyperFuelClient.res +2 -2
- package/src/sources/HyperFuelSource.res +35 -13
- package/src/sources/HyperFuelSource.res.mjs +26 -16
- package/src/sources/HyperSync.res +61 -60
- package/src/sources/HyperSync.res.mjs +53 -67
- package/src/sources/HyperSync.resi +6 -4
- package/src/sources/HyperSyncClient.res +29 -2
- package/src/sources/HyperSyncClient.res.mjs +9 -0
- package/src/sources/HyperSyncHeightStream.res +76 -118
- package/src/sources/HyperSyncHeightStream.res.mjs +68 -75
- package/src/sources/HyperSyncSource.res +122 -143
- package/src/sources/HyperSyncSource.res.mjs +106 -121
- package/src/sources/Rpc.res +86 -14
- package/src/sources/Rpc.res.mjs +101 -9
- package/src/sources/RpcSource.res +731 -364
- package/src/sources/RpcSource.res.mjs +845 -410
- package/src/sources/RpcWebSocketHeightStream.res +181 -0
- package/src/sources/RpcWebSocketHeightStream.res.mjs +196 -0
- package/src/sources/SimulateSource.res +59 -0
- package/src/sources/SimulateSource.res.mjs +50 -0
- package/src/sources/Source.res +7 -5
- package/src/sources/SourceManager.res +358 -221
- package/src/sources/SourceManager.res.mjs +346 -171
- package/src/sources/SourceManager.resi +17 -6
- package/src/sources/Svm.res +81 -0
- package/src/sources/Svm.res.mjs +90 -0
- package/src/tui/Tui.res +247 -0
- package/src/tui/Tui.res.mjs +337 -0
- package/src/tui/bindings/Ink.res +371 -0
- package/src/tui/bindings/Ink.res.mjs +72 -0
- package/src/tui/bindings/Style.res +123 -0
- package/src/tui/bindings/Style.res.mjs +2 -0
- package/src/tui/components/BufferedProgressBar.res +40 -0
- package/src/tui/components/BufferedProgressBar.res.mjs +57 -0
- package/src/tui/components/CustomHooks.res +122 -0
- package/src/tui/components/CustomHooks.res.mjs +179 -0
- package/src/tui/components/Messages.res +41 -0
- package/src/tui/components/Messages.res.mjs +75 -0
- package/src/tui/components/SyncETA.res +174 -0
- package/src/tui/components/SyncETA.res.mjs +263 -0
- package/src/tui/components/TuiData.res +47 -0
- package/src/tui/components/TuiData.res.mjs +34 -0
- package/svm.schema.json +112 -0
- package/bin.js +0 -48
- package/src/EventRegister.res +0 -241
- package/src/EventRegister.res.mjs +0 -240
- package/src/EventRegister.resi +0 -30
- package/src/bindings/Ethers.gen.ts +0 -14
- package/src/bindings/Ethers.res +0 -204
- package/src/bindings/Ethers.res.mjs +0 -130
- /package/src/{Indexer.res.mjs → Ctx.res.mjs} +0 -0
package/src/FetchState.res
CHANGED
|
@@ -11,7 +11,15 @@ type blockNumberAndLogIndex = {blockNumber: int, logIndex: int}
|
|
|
11
11
|
|
|
12
12
|
type selection = {eventConfigs: array<Internal.eventConfig>, dependsOnAddresses: bool}
|
|
13
13
|
|
|
14
|
-
type
|
|
14
|
+
type pendingQuery = {
|
|
15
|
+
fromBlock: int,
|
|
16
|
+
toBlock: option<int>,
|
|
17
|
+
isChunk: bool,
|
|
18
|
+
// Stores latestFetchedBlock when query completes. Only needed to persist
|
|
19
|
+
// timestamp while earlier queries are still pending before updating
|
|
20
|
+
// the partition's latestFetchedBlock.
|
|
21
|
+
mutable fetchedBlock: option<blockNumberAndTimestamp>,
|
|
22
|
+
}
|
|
15
23
|
|
|
16
24
|
/**
|
|
17
25
|
A state that holds a queue of events and data regarding what to fetch next
|
|
@@ -21,20 +29,450 @@ the are getting merged until the maxAddrInPartition is reached.
|
|
|
21
29
|
*/
|
|
22
30
|
type partition = {
|
|
23
31
|
id: string,
|
|
24
|
-
|
|
32
|
+
// The block number of the latest fetched query
|
|
33
|
+
// which added all its events to the queue
|
|
25
34
|
latestFetchedBlock: blockNumberAndTimestamp,
|
|
26
35
|
selection: selection,
|
|
27
36
|
addressesByContractName: dict<array<Address.t>>,
|
|
37
|
+
mergeBlock: option<int>,
|
|
38
|
+
// When set, partition indexes a single dynamic contract type.
|
|
39
|
+
// The addressesByContractName must contain only addresses for this contract.
|
|
40
|
+
dynamicContract: option<string>,
|
|
41
|
+
// Mutable array for SourceManager sync - queries exist only while being fetched
|
|
42
|
+
mutPendingQueries: array<pendingQuery>,
|
|
43
|
+
// Track last 3 successful query ranges for chunking heuristic (0 means no data)
|
|
44
|
+
prevQueryRange: int,
|
|
45
|
+
prevPrevQueryRange: int,
|
|
46
|
+
// Tracks the latestFetchedBlock.blockNumber of the most recent response
|
|
47
|
+
// that updated prevQueryRange. Prevents degradation of the chunking heuristic
|
|
48
|
+
// when parallel query responses arrive out of order.
|
|
49
|
+
latestBlockRangeUpdateBlock: int,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
type query = {
|
|
53
|
+
partitionId: string,
|
|
54
|
+
fromBlock: int,
|
|
55
|
+
toBlock: option<int>,
|
|
56
|
+
isChunk: bool,
|
|
57
|
+
selection: selection,
|
|
58
|
+
addressesByContractName: dict<array<Address.t>>,
|
|
59
|
+
indexingContracts: dict<Internal.indexingContract>,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Calculate the chunk range from history using min-of-last-3-ranges heuristic
|
|
63
|
+
let getMinHistoryRange = (p: partition) => {
|
|
64
|
+
switch (p.prevQueryRange, p.prevPrevQueryRange) {
|
|
65
|
+
| (0, _) | (_, 0) => None
|
|
66
|
+
| (a, b) => Some(a < b ? a : b)
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
let getMinQueryRange = (partitions: array<partition>) => {
|
|
71
|
+
let min = ref(0)
|
|
72
|
+
for i in 0 to partitions->Array.length - 1 {
|
|
73
|
+
let p = partitions->Js.Array2.unsafe_get(i)
|
|
74
|
+
let a = p.prevQueryRange
|
|
75
|
+
let b = p.prevPrevQueryRange
|
|
76
|
+
if a > 0 && (min.contents == 0 || a < min.contents) {
|
|
77
|
+
min := a
|
|
78
|
+
}
|
|
79
|
+
if b > 0 && (min.contents == 0 || b < min.contents) {
|
|
80
|
+
min := b
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
min.contents
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
module OptimizedPartitions = {
|
|
87
|
+
type t = {
|
|
88
|
+
idsInAscOrder: array<string>,
|
|
89
|
+
entities: dict<partition>, // hello redux-toolkit :)
|
|
90
|
+
// Used for the incremental partition id. Can't use the partitions length,
|
|
91
|
+
// since partitions might be deleted on merge or cleaned up
|
|
92
|
+
maxAddrInPartition: int,
|
|
93
|
+
nextPartitionIndex: int,
|
|
94
|
+
// Tracks all contract names that have been dynamically added.
|
|
95
|
+
// Never reset - used to determine when to split existing partitions.
|
|
96
|
+
dynamicContracts: Utils.Set.t<string>,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
@inline
|
|
100
|
+
let count = (optimizedPartitions: t) => optimizedPartitions.idsInAscOrder->Array.length
|
|
101
|
+
|
|
102
|
+
@inline
|
|
103
|
+
let getOrThrow = (optimizedPartitions: t, ~partitionId) => {
|
|
104
|
+
switch optimizedPartitions.entities->Js.Dict.get(partitionId) {
|
|
105
|
+
| Some(p) => p
|
|
106
|
+
| None => Js.Exn.raiseError(`Unexpected case: Couldn't find partition ${partitionId}`)
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Merges two partitions at a given potentialMergeBlock.
|
|
111
|
+
// Returns array<partition> where the last element is the continuing partition
|
|
112
|
+
// and all preceding elements are completed (have mergeBlock set).
|
|
113
|
+
// Handles address overflow splitting inline.
|
|
114
|
+
let mergePartitionsAtBlock = (
|
|
115
|
+
~p1: partition,
|
|
116
|
+
~p2: partition,
|
|
117
|
+
~potentialMergeBlock: int,
|
|
118
|
+
~contractName: string,
|
|
119
|
+
~maxAddrInPartition: int,
|
|
120
|
+
~nextPartitionIndexRef: ref<int>,
|
|
121
|
+
) => {
|
|
122
|
+
let combinedAddresses =
|
|
123
|
+
p1.addressesByContractName
|
|
124
|
+
->Js.Dict.unsafeGet(contractName)
|
|
125
|
+
->Js.Array2.concat(p2.addressesByContractName->Js.Dict.unsafeGet(contractName))
|
|
126
|
+
|
|
127
|
+
let p1Below = p1.latestFetchedBlock.blockNumber < potentialMergeBlock
|
|
128
|
+
let p2Below = p2.latestFetchedBlock.blockNumber < potentialMergeBlock
|
|
129
|
+
|
|
130
|
+
// Build the continuing partition (at potentialMergeBlock with combined addresses),
|
|
131
|
+
// collecting completed partitions (with mergeBlock) along the way
|
|
132
|
+
let completed = []
|
|
133
|
+
let continuingBase = switch (p1Below, p2Below) {
|
|
134
|
+
| (false, false) => p1
|
|
135
|
+
| (false, true) =>
|
|
136
|
+
completed->Js.Array2.push({...p2, mergeBlock: Some(potentialMergeBlock)})->ignore
|
|
137
|
+
p1
|
|
138
|
+
| (true, false) =>
|
|
139
|
+
completed->Js.Array2.push({...p1, mergeBlock: Some(potentialMergeBlock)})->ignore
|
|
140
|
+
p2
|
|
141
|
+
| (true, true) =>
|
|
142
|
+
completed->Js.Array2.push({...p1, mergeBlock: Some(potentialMergeBlock)})->ignore
|
|
143
|
+
completed->Js.Array2.push({...p2, mergeBlock: Some(potentialMergeBlock)})->ignore
|
|
144
|
+
let newId = nextPartitionIndexRef.contents->Js.Int.toString
|
|
145
|
+
nextPartitionIndexRef := nextPartitionIndexRef.contents + 1
|
|
146
|
+
let minRange = getMinQueryRange([p1, p2])
|
|
147
|
+
{
|
|
148
|
+
id: newId,
|
|
149
|
+
dynamicContract: Some(contractName),
|
|
150
|
+
selection: p1.selection,
|
|
151
|
+
latestFetchedBlock: {blockNumber: potentialMergeBlock, blockTimestamp: 0},
|
|
152
|
+
mergeBlock: None,
|
|
153
|
+
addressesByContractName: Js.Dict.empty(), // set below
|
|
154
|
+
mutPendingQueries: [],
|
|
155
|
+
prevQueryRange: minRange,
|
|
156
|
+
prevPrevQueryRange: minRange,
|
|
157
|
+
latestBlockRangeUpdateBlock: 0,
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Apply address split on the continuing partition
|
|
162
|
+
if combinedAddresses->Js.Array2.length > maxAddrInPartition {
|
|
163
|
+
let addressesFull = combinedAddresses->Js.Array2.slice(~start=0, ~end_=maxAddrInPartition)
|
|
164
|
+
let addressesRest = combinedAddresses->Js.Array2.sliceFrom(maxAddrInPartition)
|
|
165
|
+
let abcFull = Js.Dict.empty()
|
|
166
|
+
abcFull->Js.Dict.set(contractName, addressesFull)
|
|
167
|
+
let abcRest = Js.Dict.empty()
|
|
168
|
+
abcRest->Js.Dict.set(contractName, addressesRest)
|
|
169
|
+
completed->Js.Array2.push({...continuingBase, addressesByContractName: abcFull})->ignore
|
|
170
|
+
let restId = nextPartitionIndexRef.contents->Js.Int.toString
|
|
171
|
+
nextPartitionIndexRef := nextPartitionIndexRef.contents + 1
|
|
172
|
+
completed
|
|
173
|
+
->Js.Array2.push({
|
|
174
|
+
...continuingBase,
|
|
175
|
+
id: restId,
|
|
176
|
+
addressesByContractName: abcRest,
|
|
177
|
+
mutPendingQueries: [],
|
|
178
|
+
})
|
|
179
|
+
->ignore
|
|
180
|
+
completed
|
|
181
|
+
} else {
|
|
182
|
+
let abc = Js.Dict.empty()
|
|
183
|
+
abc->Js.Dict.set(contractName, combinedAddresses)
|
|
184
|
+
completed->Js.Array2.push({...continuingBase, addressesByContractName: abc})->ignore
|
|
185
|
+
completed
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Random number from my head
|
|
190
|
+
// Not super critical if it's too big or too small
|
|
191
|
+
// We optimize for fastest data which we get in any case.
|
|
192
|
+
// If the value is off, it'll only result in
|
|
193
|
+
// quering the same block range multiple times
|
|
194
|
+
let tooFarBlockRange = 20_000
|
|
195
|
+
|
|
196
|
+
let ascSortFn = (a, b) => a.latestFetchedBlock.blockNumber - b.latestFetchedBlock.blockNumber
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Optimizes partitions by finding opportunities to merge partitions that
|
|
200
|
+
* are behind other partitions with same/superset of contract names.
|
|
201
|
+
*
|
|
202
|
+
* Only partitions with dynamicContract set are eligible for optimization.
|
|
203
|
+
* This way we don't have optimization overhead when partitions are stable.
|
|
204
|
+
*/
|
|
205
|
+
let make = (
|
|
206
|
+
~partitions: array<partition>,
|
|
207
|
+
~maxAddrInPartition,
|
|
208
|
+
~nextPartitionIndex: int,
|
|
209
|
+
~dynamicContracts: Utils.Set.t<string>,
|
|
210
|
+
) => {
|
|
211
|
+
let newPartitions = []
|
|
212
|
+
let mergingPartitions = Js.Dict.empty()
|
|
213
|
+
let nextPartitionIndexRef = ref(nextPartitionIndex)
|
|
214
|
+
|
|
215
|
+
for idx in 0 to partitions->Array.length - 1 {
|
|
216
|
+
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
217
|
+
switch p {
|
|
218
|
+
// Since it's not a dynamic contract partition,
|
|
219
|
+
// there's no need for merge logic
|
|
220
|
+
| {dynamicContract: None}
|
|
221
|
+
| // Wildcard doesn't need merging
|
|
222
|
+
{selection: {dependsOnAddresses: false}}
|
|
223
|
+
| // For now don't merge partitions with mergeBlock,
|
|
224
|
+
// assuming they are already merged,
|
|
225
|
+
// TODO: Although there might be cases with too far away mergeBlock,
|
|
226
|
+
// which is worth merging
|
|
227
|
+
{mergeBlock: Some(_)} =>
|
|
228
|
+
newPartitions->Js.Array2.push(p)->ignore
|
|
229
|
+
| {dynamicContract: Some(contractName)} =>
|
|
230
|
+
let pAddressesCount =
|
|
231
|
+
p.addressesByContractName->Js.Dict.unsafeGet(contractName)->Js.Array2.length
|
|
232
|
+
// Compute merge block: last pending query's toBlock, or lfb if idle
|
|
233
|
+
let potentialMergeBlock = switch p.mutPendingQueries->Utils.Array.last {
|
|
234
|
+
| Some({isChunk: true, toBlock: Some(toBlock)}) => Some(toBlock)
|
|
235
|
+
| Some(_) => None // unbounded query -- can't merge
|
|
236
|
+
| None => Some(p.latestFetchedBlock.blockNumber)
|
|
237
|
+
}
|
|
238
|
+
switch potentialMergeBlock {
|
|
239
|
+
| None => newPartitions->Js.Array2.push(p)->ignore
|
|
240
|
+
| Some(potentialMergeBlock) =>
|
|
241
|
+
if pAddressesCount >= maxAddrInPartition {
|
|
242
|
+
newPartitions->Js.Array2.push(p)->ignore
|
|
243
|
+
} else {
|
|
244
|
+
let partitionsByMergeBlock =
|
|
245
|
+
mergingPartitions->Utils.Dict.getOrInsertEmptyDict(contractName)
|
|
246
|
+
switch partitionsByMergeBlock->Utils.Dict.dangerouslyGetByIntNonOption(
|
|
247
|
+
potentialMergeBlock,
|
|
248
|
+
) {
|
|
249
|
+
| Some(existingPartition) =>
|
|
250
|
+
let result = mergePartitionsAtBlock(
|
|
251
|
+
~p1=existingPartition,
|
|
252
|
+
~p2=p,
|
|
253
|
+
~potentialMergeBlock,
|
|
254
|
+
~contractName,
|
|
255
|
+
~maxAddrInPartition,
|
|
256
|
+
~nextPartitionIndexRef,
|
|
257
|
+
)
|
|
258
|
+
for i in 0 to result->Array.length - 2 {
|
|
259
|
+
newPartitions->Js.Array2.push(result->Js.Array2.unsafe_get(i))->ignore
|
|
260
|
+
}
|
|
261
|
+
partitionsByMergeBlock->Utils.Dict.setByInt(
|
|
262
|
+
potentialMergeBlock,
|
|
263
|
+
result->Utils.Array.lastUnsafe,
|
|
264
|
+
)
|
|
265
|
+
| None => partitionsByMergeBlock->Utils.Dict.setByInt(potentialMergeBlock, p)
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
let merginDynamicContracts = mergingPartitions->Js.Dict.keys
|
|
273
|
+
for idx in 0 to merginDynamicContracts->Array.length - 1 {
|
|
274
|
+
let contractName = merginDynamicContracts->Js.Array2.unsafe_get(idx)
|
|
275
|
+
let partitionsByMergeBlock = mergingPartitions->Js.Dict.unsafeGet(contractName)
|
|
276
|
+
// JS engine automatically sorts number keys in objects
|
|
277
|
+
let ascPartitionKeys = partitionsByMergeBlock->Js.Dict.keys
|
|
278
|
+
|
|
279
|
+
// But -1 is placed last...
|
|
280
|
+
if ascPartitionKeys->Js.Array2.unsafe_get(ascPartitionKeys->Array.length - 1) === "-1" {
|
|
281
|
+
ascPartitionKeys
|
|
282
|
+
->Js.Array2.unshift(ascPartitionKeys->Js.Array2.pop->Option.getUnsafe)
|
|
283
|
+
->ignore
|
|
284
|
+
}
|
|
285
|
+
let currentPRef = ref(
|
|
286
|
+
partitionsByMergeBlock->Js.Dict.unsafeGet(ascPartitionKeys->Utils.Array.firstUnsafe),
|
|
287
|
+
)
|
|
288
|
+
let currentPMergeBlockRef = ref(
|
|
289
|
+
ascPartitionKeys->Utils.Array.firstUnsafe->Int.fromString->Option.getUnsafe,
|
|
290
|
+
)
|
|
291
|
+
let nextJdx = ref(1)
|
|
292
|
+
while nextJdx.contents < ascPartitionKeys->Array.length {
|
|
293
|
+
let nextKey = ascPartitionKeys->Js.Array2.unsafe_get(nextJdx.contents)
|
|
294
|
+
let currentP = currentPRef.contents
|
|
295
|
+
let nextP = partitionsByMergeBlock->Js.Dict.unsafeGet(nextKey)
|
|
296
|
+
let nextPMergeBlock = nextKey->Int.fromString->Option.getUnsafe
|
|
297
|
+
let currentPMergeBlock = currentPMergeBlockRef.contents
|
|
298
|
+
|
|
299
|
+
let isTooFar = currentPMergeBlock + tooFarBlockRange < nextPMergeBlock
|
|
300
|
+
if isTooFar {
|
|
301
|
+
newPartitions->Js.Array2.push(currentP)->ignore
|
|
302
|
+
currentPRef := nextP
|
|
303
|
+
currentPMergeBlockRef := nextPMergeBlock
|
|
304
|
+
} else {
|
|
305
|
+
let result = mergePartitionsAtBlock(
|
|
306
|
+
~p1=nextP,
|
|
307
|
+
~p2=currentP,
|
|
308
|
+
~potentialMergeBlock=nextPMergeBlock,
|
|
309
|
+
~contractName,
|
|
310
|
+
~maxAddrInPartition,
|
|
311
|
+
~nextPartitionIndexRef,
|
|
312
|
+
)
|
|
313
|
+
for i in 0 to result->Array.length - 2 {
|
|
314
|
+
newPartitions->Js.Array2.push(result->Js.Array2.unsafe_get(i))->ignore
|
|
315
|
+
}
|
|
316
|
+
currentPRef := result->Utils.Array.lastUnsafe
|
|
317
|
+
currentPMergeBlockRef := nextPMergeBlock
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
nextJdx := nextJdx.contents + 1
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
newPartitions->Js.Array2.push(currentPRef.contents)->ignore
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// Sort partitions by latestFetchedBlock ascending
|
|
327
|
+
let _ = newPartitions->Js.Array2.sortInPlaceWith(ascSortFn)
|
|
328
|
+
|
|
329
|
+
let partitionsCount = newPartitions->Array.length
|
|
330
|
+
let idsInAscOrder = Belt.Array.makeUninitializedUnsafe(partitionsCount)
|
|
331
|
+
let entities = Js.Dict.empty()
|
|
332
|
+
for idx in 0 to partitionsCount - 1 {
|
|
333
|
+
let p = newPartitions->Js.Array2.unsafe_get(idx)
|
|
334
|
+
idsInAscOrder->Js.Array2.unsafe_set(idx, p.id)
|
|
335
|
+
entities->Js.Dict.set(p.id, p)
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
{
|
|
339
|
+
idsInAscOrder,
|
|
340
|
+
entities,
|
|
341
|
+
maxAddrInPartition,
|
|
342
|
+
nextPartitionIndex: nextPartitionIndexRef.contents,
|
|
343
|
+
dynamicContracts,
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
// Helper to process fetched queries from the front of the queue
|
|
348
|
+
// Removes consecutive fetched queries and returns the last fetchedBlock.
|
|
349
|
+
// Stops if the next query's fromBlock is not contiguous with the current
|
|
350
|
+
// latestFetchedBlock (gap from a partial chunk fetch).
|
|
351
|
+
@inline
|
|
352
|
+
let consumeFetchedQueries = (
|
|
353
|
+
mutPendingQueries: array<pendingQuery>,
|
|
354
|
+
~initialLatestFetchedBlock: blockNumberAndTimestamp,
|
|
355
|
+
) => {
|
|
356
|
+
let latestFetchedBlock = ref(initialLatestFetchedBlock)
|
|
357
|
+
|
|
358
|
+
while (
|
|
359
|
+
mutPendingQueries->Array.length > 0 && {
|
|
360
|
+
let pq = mutPendingQueries->Utils.Array.firstUnsafe
|
|
361
|
+
pq.fetchedBlock !== None && pq.fromBlock <= latestFetchedBlock.contents.blockNumber + 1
|
|
362
|
+
}
|
|
363
|
+
) {
|
|
364
|
+
let removedQuery = mutPendingQueries->Js.Array2.shift->Option.getUnsafe
|
|
365
|
+
latestFetchedBlock := removedQuery.fetchedBlock->Option.getUnsafe
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
latestFetchedBlock.contents
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
let getPendingQueryOrThrow = (p: partition, ~fromBlock) => {
|
|
372
|
+
let idxRef = ref(0)
|
|
373
|
+
let pendingQueryRef = ref(None)
|
|
374
|
+
while idxRef.contents < p.mutPendingQueries->Array.length && pendingQueryRef.contents === None {
|
|
375
|
+
let pq = p.mutPendingQueries->Js.Array2.unsafe_get(idxRef.contents)
|
|
376
|
+
if pq.fromBlock === fromBlock {
|
|
377
|
+
pendingQueryRef := Some(pq)
|
|
378
|
+
}
|
|
379
|
+
idxRef := idxRef.contents + 1
|
|
380
|
+
}
|
|
381
|
+
switch pendingQueryRef.contents {
|
|
382
|
+
| Some(pq) => pq
|
|
383
|
+
| None =>
|
|
384
|
+
Js.Exn.raiseError(
|
|
385
|
+
`Pending query not found for partition ${p.id} fromBlock ${fromBlock->Int.toString}`,
|
|
386
|
+
)
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
let handleQueryResponse = (
|
|
391
|
+
optimizedPartitions: t,
|
|
392
|
+
~query,
|
|
393
|
+
~knownHeight,
|
|
394
|
+
~latestFetchedBlock: blockNumberAndTimestamp,
|
|
395
|
+
) => {
|
|
396
|
+
let p = optimizedPartitions->getOrThrow(~partitionId=query.partitionId)
|
|
397
|
+
let mutEntities = optimizedPartitions.entities->Utils.Dict.shallowCopy
|
|
398
|
+
|
|
399
|
+
// Mark query as fetched
|
|
400
|
+
let pendingQuery = getPendingQueryOrThrow(p, ~fromBlock=query.fromBlock)
|
|
401
|
+
pendingQuery.fetchedBlock = Some(latestFetchedBlock)
|
|
402
|
+
|
|
403
|
+
let blockRange = latestFetchedBlock.blockNumber - query.fromBlock + 1
|
|
404
|
+
// Skip updating block range if a later response already updated it.
|
|
405
|
+
// Prevents degradation of the chunking heuristic when parallel query
|
|
406
|
+
// responses arrive out of order (e.g. earlier query with smaller range
|
|
407
|
+
// arriving after a later query with bigger range).
|
|
408
|
+
let shouldUpdateBlockRange =
|
|
409
|
+
latestFetchedBlock.blockNumber > p.latestBlockRangeUpdateBlock &&
|
|
410
|
+
switch query.toBlock {
|
|
411
|
+
| None => latestFetchedBlock.blockNumber < knownHeight - 10 // Don't update block range when very close to the head
|
|
412
|
+
| Some(queryToBlock) =>
|
|
413
|
+
// Update on partial response (direct capacity evidence),
|
|
414
|
+
// or when the query's intended range covers at least the partition's
|
|
415
|
+
// current chunk range — meaning it was a capacity-based split chunk,
|
|
416
|
+
// not a small gap-fill whose toBlock is an artificial boundary.
|
|
417
|
+
latestFetchedBlock.blockNumber < queryToBlock ||
|
|
418
|
+
switch getMinHistoryRange(p) {
|
|
419
|
+
| None => false // Chunking not active yet, don't update
|
|
420
|
+
| Some(minHistoryRange) => queryToBlock - query.fromBlock + 1 >= minHistoryRange
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
let updatedPrevQueryRange = shouldUpdateBlockRange ? blockRange : p.prevQueryRange
|
|
424
|
+
let updatedPrevPrevQueryRange = shouldUpdateBlockRange ? p.prevQueryRange : p.prevPrevQueryRange
|
|
425
|
+
|
|
426
|
+
// Process fetched queries from front of queue for main partition
|
|
427
|
+
let updatedLatestFetchedBlock = consumeFetchedQueries(
|
|
428
|
+
p.mutPendingQueries,
|
|
429
|
+
~initialLatestFetchedBlock=p.latestFetchedBlock,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
// Check if partition reached its mergeBlock and should be removed
|
|
433
|
+
let partitionReachedMergeBlock = switch p.mergeBlock {
|
|
434
|
+
| Some(mergeBlock) => updatedLatestFetchedBlock.blockNumber >= mergeBlock
|
|
435
|
+
| None => false
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
if partitionReachedMergeBlock {
|
|
439
|
+
mutEntities->Utils.Dict.deleteInPlace(p.id)
|
|
440
|
+
} else {
|
|
441
|
+
let updatedMainPartition = {
|
|
442
|
+
...p,
|
|
443
|
+
latestFetchedBlock: updatedLatestFetchedBlock,
|
|
444
|
+
prevQueryRange: updatedPrevQueryRange,
|
|
445
|
+
prevPrevQueryRange: updatedPrevPrevQueryRange,
|
|
446
|
+
latestBlockRangeUpdateBlock: shouldUpdateBlockRange
|
|
447
|
+
? latestFetchedBlock.blockNumber
|
|
448
|
+
: p.latestBlockRangeUpdateBlock,
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
mutEntities->Js.Dict.set(p.id, updatedMainPartition)
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
// Re-optimize to maintain sorted order and apply optimizations
|
|
455
|
+
make(
|
|
456
|
+
~partitions=mutEntities->Js.Dict.values,
|
|
457
|
+
~maxAddrInPartition=optimizedPartitions.maxAddrInPartition,
|
|
458
|
+
~nextPartitionIndex=optimizedPartitions.nextPartitionIndex,
|
|
459
|
+
~dynamicContracts=optimizedPartitions.dynamicContracts,
|
|
460
|
+
)
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
@inline
|
|
464
|
+
let getLatestFullyFetchedBlock = (optimizedPartitions: t) => {
|
|
465
|
+
switch optimizedPartitions.idsInAscOrder->Array.get(0) {
|
|
466
|
+
| Some(id) => Some((optimizedPartitions.entities->Js.Dict.unsafeGet(id)).latestFetchedBlock)
|
|
467
|
+
| None => None
|
|
468
|
+
}
|
|
469
|
+
}
|
|
28
470
|
}
|
|
29
471
|
|
|
30
472
|
type t = {
|
|
31
|
-
|
|
32
|
-
// Used for the incremental partition id. Can't use the partitions length,
|
|
33
|
-
// since partitions might be deleted on merge or cleaned up
|
|
34
|
-
nextPartitionIndex: int,
|
|
473
|
+
optimizedPartitions: OptimizedPartitions.t,
|
|
35
474
|
startBlock: int,
|
|
36
475
|
endBlock: option<int>,
|
|
37
|
-
maxAddrInPartition: int,
|
|
38
476
|
normalSelection: selection,
|
|
39
477
|
// By address
|
|
40
478
|
indexingContracts: dict<Internal.indexingContract>,
|
|
@@ -42,9 +480,6 @@ type t = {
|
|
|
42
480
|
contractConfigs: dict<contractConfig>,
|
|
43
481
|
// Not used for logic - only metadata
|
|
44
482
|
chainId: int,
|
|
45
|
-
// The block number of the latest block fetched
|
|
46
|
-
// which added all its events to the queue
|
|
47
|
-
latestFullyFetchedBlock: blockNumberAndTimestamp,
|
|
48
483
|
// The block number of the latest block which was added to the queue
|
|
49
484
|
// by the onBlock configs
|
|
50
485
|
// Need a separate pointer for this
|
|
@@ -59,104 +494,39 @@ type t = {
|
|
|
59
494
|
// ready for processing
|
|
60
495
|
targetBufferSize: int,
|
|
61
496
|
onBlockConfigs: array<Internal.onBlockConfig>,
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition) => {
|
|
65
|
-
switch (p, target) {
|
|
66
|
-
| ({selection: {dependsOnAddresses: true}}, {selection: {dependsOnAddresses: true}}) => {
|
|
67
|
-
let latestFetchedBlock = target.latestFetchedBlock
|
|
68
|
-
|
|
69
|
-
let mergedAddresses = Js.Dict.empty()
|
|
70
|
-
|
|
71
|
-
let allowedAddressesNumber = ref(maxAddrInPartition)
|
|
72
|
-
|
|
73
|
-
target.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
74
|
-
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
75
|
-
mergedAddresses->Js.Dict.set(contractName, addresses)
|
|
76
|
-
})
|
|
77
|
-
|
|
78
|
-
// Start with putting all addresses to the merging dict
|
|
79
|
-
// And if they exceed the limit, start removing from the merging dict
|
|
80
|
-
// and putting into the rest dict
|
|
81
|
-
p.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
82
|
-
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
83
|
-
switch mergedAddresses->Utils.Dict.dangerouslyGetNonOption(contractName) {
|
|
84
|
-
| Some(targetAddresses) =>
|
|
85
|
-
mergedAddresses->Js.Dict.set(contractName, Array.concat(targetAddresses, addresses))
|
|
86
|
-
| None => mergedAddresses->Js.Dict.set(contractName, addresses)
|
|
87
|
-
}
|
|
88
|
-
})
|
|
89
|
-
|
|
90
|
-
let rest = if allowedAddressesNumber.contents < 0 {
|
|
91
|
-
let restAddresses = Js.Dict.empty()
|
|
92
|
-
|
|
93
|
-
mergedAddresses->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
94
|
-
if allowedAddressesNumber.contents === 0 {
|
|
95
|
-
()
|
|
96
|
-
} else if addresses->Array.length <= -allowedAddressesNumber.contents {
|
|
97
|
-
allowedAddressesNumber := allowedAddressesNumber.contents + addresses->Array.length
|
|
98
|
-
mergedAddresses->Utils.Dict.deleteInPlace(contractName)
|
|
99
|
-
restAddresses->Js.Dict.set(contractName, addresses)
|
|
100
|
-
} else {
|
|
101
|
-
let restFrom = addresses->Array.length + allowedAddressesNumber.contents
|
|
102
|
-
mergedAddresses->Js.Dict.set(
|
|
103
|
-
contractName,
|
|
104
|
-
addresses->Js.Array2.slice(~start=0, ~end_=restFrom),
|
|
105
|
-
)
|
|
106
|
-
restAddresses->Js.Dict.set(contractName, addresses->Js.Array2.sliceFrom(restFrom))
|
|
107
|
-
allowedAddressesNumber := 0
|
|
108
|
-
}
|
|
109
|
-
})
|
|
110
|
-
|
|
111
|
-
Some({
|
|
112
|
-
id: p.id,
|
|
113
|
-
status: {
|
|
114
|
-
fetchingStateId: None,
|
|
115
|
-
},
|
|
116
|
-
selection: target.selection,
|
|
117
|
-
addressesByContractName: restAddresses,
|
|
118
|
-
latestFetchedBlock,
|
|
119
|
-
})
|
|
120
|
-
} else {
|
|
121
|
-
None
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
(
|
|
125
|
-
{
|
|
126
|
-
id: target.id,
|
|
127
|
-
status: {
|
|
128
|
-
fetchingStateId: None,
|
|
129
|
-
},
|
|
130
|
-
selection: target.selection,
|
|
131
|
-
addressesByContractName: mergedAddresses,
|
|
132
|
-
latestFetchedBlock,
|
|
133
|
-
},
|
|
134
|
-
rest,
|
|
135
|
-
)
|
|
136
|
-
}
|
|
137
|
-
| ({selection: {dependsOnAddresses: false}}, _)
|
|
138
|
-
| (_, {selection: {dependsOnAddresses: false}}) => (p, Some(target))
|
|
139
|
-
}
|
|
497
|
+
knownHeight: int,
|
|
498
|
+
firstEventBlock: option<int>,
|
|
140
499
|
}
|
|
141
500
|
|
|
142
501
|
@inline
|
|
143
|
-
let bufferBlockNumber = ({
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
502
|
+
let bufferBlockNumber = ({latestOnBlockBlockNumber, optimizedPartitions}: t) => {
|
|
503
|
+
switch optimizedPartitions->OptimizedPartitions.getLatestFullyFetchedBlock {
|
|
504
|
+
| None => latestOnBlockBlockNumber
|
|
505
|
+
| Some(latestFullyFetchedBlock) =>
|
|
506
|
+
latestOnBlockBlockNumber < latestFullyFetchedBlock.blockNumber
|
|
507
|
+
? latestOnBlockBlockNumber
|
|
508
|
+
: latestFullyFetchedBlock.blockNumber
|
|
509
|
+
}
|
|
147
510
|
}
|
|
148
511
|
|
|
149
512
|
/**
|
|
150
513
|
* Returns the latest block which is ready to be consumed
|
|
151
514
|
*/
|
|
152
515
|
@inline
|
|
153
|
-
let bufferBlock = ({
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
516
|
+
let bufferBlock = ({optimizedPartitions, latestOnBlockBlockNumber}: t) => {
|
|
517
|
+
switch optimizedPartitions->OptimizedPartitions.getLatestFullyFetchedBlock {
|
|
518
|
+
| None => {
|
|
519
|
+
blockNumber: latestOnBlockBlockNumber,
|
|
520
|
+
blockTimestamp: 0,
|
|
521
|
+
}
|
|
522
|
+
| Some(latestFullyFetchedBlock) =>
|
|
523
|
+
latestOnBlockBlockNumber < latestFullyFetchedBlock.blockNumber
|
|
524
|
+
? {
|
|
525
|
+
blockNumber: latestOnBlockBlockNumber,
|
|
526
|
+
blockTimestamp: 0,
|
|
527
|
+
}
|
|
528
|
+
: latestFullyFetchedBlock
|
|
529
|
+
}
|
|
160
530
|
}
|
|
161
531
|
|
|
162
532
|
/*
|
|
@@ -174,31 +544,24 @@ let compareBufferItem = (a: Internal.item, b: Internal.item) => {
|
|
|
174
544
|
// Some big number which should be bigger than any log index
|
|
175
545
|
let blockItemLogIndex = 16777216
|
|
176
546
|
|
|
547
|
+
let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length
|
|
548
|
+
|
|
177
549
|
/*
|
|
178
|
-
|
|
179
|
-
|
|
550
|
+
Update fetchState, merge registers and recompute derived values.
|
|
551
|
+
Runs partition optimization when partitions change.
|
|
552
|
+
*/
|
|
180
553
|
let updateInternal = (
|
|
181
554
|
fetchState: t,
|
|
182
|
-
~
|
|
183
|
-
~nextPartitionIndex=fetchState.nextPartitionIndex,
|
|
555
|
+
~optimizedPartitions=fetchState.optimizedPartitions,
|
|
184
556
|
~indexingContracts=fetchState.indexingContracts,
|
|
185
557
|
~mutItems=?,
|
|
186
558
|
~blockLag=fetchState.blockLag,
|
|
559
|
+
~knownHeight=fetchState.knownHeight,
|
|
187
560
|
): t => {
|
|
188
|
-
let firstPartition = partitions->Js.Array2.unsafe_get(0)
|
|
189
|
-
let latestFullyFetchedBlock = ref(firstPartition.latestFetchedBlock)
|
|
190
|
-
for idx in 0 to partitions->Array.length - 1 {
|
|
191
|
-
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
192
|
-
if latestFullyFetchedBlock.contents.blockNumber > p.latestFetchedBlock.blockNumber {
|
|
193
|
-
latestFullyFetchedBlock := p.latestFetchedBlock
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
let latestFullyFetchedBlock = latestFullyFetchedBlock.contents
|
|
197
|
-
|
|
198
561
|
let mutItemsRef = ref(mutItems)
|
|
199
562
|
|
|
200
563
|
let latestOnBlockBlockNumber = switch fetchState.onBlockConfigs {
|
|
201
|
-
| [] =>
|
|
564
|
+
| [] => knownHeight
|
|
202
565
|
| onBlockConfigs => {
|
|
203
566
|
// Calculate the max block number we are going to create items for
|
|
204
567
|
// Use targetBufferSize to get the last target item in the buffer
|
|
@@ -212,7 +575,11 @@ let updateInternal = (
|
|
|
212
575
|
| None => fetchState.buffer
|
|
213
576
|
}->Belt.Array.get(fetchState.targetBufferSize - 1) {
|
|
214
577
|
| Some(item) => item->Internal.getItemBlockNumber
|
|
215
|
-
| None =>
|
|
578
|
+
| None =>
|
|
579
|
+
switch optimizedPartitions->OptimizedPartitions.getLatestFullyFetchedBlock {
|
|
580
|
+
| None => knownHeight
|
|
581
|
+
| Some(latestFullyFetchedBlock) => latestFullyFetchedBlock.blockNumber
|
|
582
|
+
}
|
|
216
583
|
}
|
|
217
584
|
|
|
218
585
|
let mutItems = switch mutItemsRef.contents {
|
|
@@ -268,7 +635,6 @@ let updateInternal = (
|
|
|
268
635
|
}
|
|
269
636
|
|
|
270
637
|
let updatedFetchState = {
|
|
271
|
-
maxAddrInPartition: fetchState.maxAddrInPartition,
|
|
272
638
|
startBlock: fetchState.startBlock,
|
|
273
639
|
endBlock: fetchState.endBlock,
|
|
274
640
|
contractConfigs: fetchState.contractConfigs,
|
|
@@ -276,12 +642,11 @@ let updateInternal = (
|
|
|
276
642
|
chainId: fetchState.chainId,
|
|
277
643
|
onBlockConfigs: fetchState.onBlockConfigs,
|
|
278
644
|
targetBufferSize: fetchState.targetBufferSize,
|
|
279
|
-
|
|
280
|
-
partitions,
|
|
645
|
+
optimizedPartitions,
|
|
281
646
|
latestOnBlockBlockNumber,
|
|
282
|
-
latestFullyFetchedBlock,
|
|
283
647
|
indexingContracts,
|
|
284
648
|
blockLag,
|
|
649
|
+
knownHeight,
|
|
285
650
|
buffer: switch mutItemsRef.contents {
|
|
286
651
|
// Theoretically it could be faster to asume that
|
|
287
652
|
// the items are sorted, but there are cases
|
|
@@ -289,10 +654,11 @@ let updateInternal = (
|
|
|
289
654
|
| Some(mutItems) => mutItems->Js.Array2.sortInPlaceWith(compareBufferItem)
|
|
290
655
|
| None => fetchState.buffer
|
|
291
656
|
},
|
|
657
|
+
firstEventBlock: fetchState.firstEventBlock,
|
|
292
658
|
}
|
|
293
659
|
|
|
294
660
|
Prometheus.IndexingPartitions.set(
|
|
295
|
-
~partitionsCount=
|
|
661
|
+
~partitionsCount=optimizedPartitions->OptimizedPartitions.count,
|
|
296
662
|
~chainId=fetchState.chainId,
|
|
297
663
|
)
|
|
298
664
|
Prometheus.IndexingBufferSize.set(
|
|
@@ -303,12 +669,16 @@ let updateInternal = (
|
|
|
303
669
|
~blockNumber=updatedFetchState->bufferBlockNumber,
|
|
304
670
|
~chainId=fetchState.chainId,
|
|
305
671
|
)
|
|
672
|
+
if indexingContracts !== fetchState.indexingContracts {
|
|
673
|
+
Prometheus.IndexingAddresses.set(
|
|
674
|
+
~addressesCount=updatedFetchState->numAddresses,
|
|
675
|
+
~chainId=fetchState.chainId,
|
|
676
|
+
)
|
|
677
|
+
}
|
|
306
678
|
|
|
307
679
|
updatedFetchState
|
|
308
680
|
}
|
|
309
681
|
|
|
310
|
-
let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length
|
|
311
|
-
|
|
312
682
|
let warnDifferentContractType = (
|
|
313
683
|
fetchState,
|
|
314
684
|
~existingContract: Internal.indexingContract,
|
|
@@ -325,6 +695,234 @@ let warnDifferentContractType = (
|
|
|
325
695
|
logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered for one contract and cannot be registered for another contract.`)
|
|
326
696
|
}
|
|
327
697
|
|
|
698
|
+
let addressesByContractNameCount = (addressesByContractName: dict<array<Address.t>>) => {
|
|
699
|
+
let numAddresses = ref(0)
|
|
700
|
+
let contractNames = addressesByContractName->Js.Dict.keys
|
|
701
|
+
for idx in 0 to contractNames->Array.length - 1 {
|
|
702
|
+
let contractName = contractNames->Js.Array2.unsafe_get(idx)
|
|
703
|
+
numAddresses :=
|
|
704
|
+
numAddresses.contents + addressesByContractName->Js.Dict.unsafeGet(contractName)->Array.length
|
|
705
|
+
}
|
|
706
|
+
numAddresses.contents
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
let addressesByContractNameGetAll = (addressesByContractName: dict<array<Address.t>>) => {
|
|
710
|
+
let all = ref([])
|
|
711
|
+
let contractNames = addressesByContractName->Js.Dict.keys
|
|
712
|
+
for idx in 0 to contractNames->Array.length - 1 {
|
|
713
|
+
let contractName = contractNames->Js.Array2.unsafe_get(idx)
|
|
714
|
+
all := all.contents->Array.concat(addressesByContractName->Js.Dict.unsafeGet(contractName))
|
|
715
|
+
}
|
|
716
|
+
all.contents
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
/**
|
|
720
|
+
Creates partitions from indexing addresses with two phases:
|
|
721
|
+
Phase 1: Create per-contract-name partitions (smart grouping by startBlock)
|
|
722
|
+
Phase 2: Merge non-dynamic partitions together to reduce unnecessary concurrency
|
|
723
|
+
Returns OptimizedPartitions.t directly.
|
|
724
|
+
(Dynamic partitions are merged by OptimizedPartitions.make automatically)
|
|
725
|
+
*/
|
|
726
|
+
let createPartitionsFromIndexingAddresses = (
|
|
727
|
+
~registeringContractsByContract: dict<dict<Internal.indexingContract>>,
|
|
728
|
+
~contractConfigs: dict<contractConfig>,
|
|
729
|
+
~dynamicContracts: Utils.Set.t<string>,
|
|
730
|
+
~normalSelection: selection,
|
|
731
|
+
~maxAddrInPartition: int,
|
|
732
|
+
~nextPartitionIndex: int,
|
|
733
|
+
~existingPartitions: array<partition>,
|
|
734
|
+
~progressBlockNumber: int,
|
|
735
|
+
): // Floor for latestFetchedBlock (use progressBlockNumber from make, or 0 for registerDynamicContracts)
|
|
736
|
+
OptimizedPartitions.t => {
|
|
737
|
+
let nextPartitionIndexRef = ref(nextPartitionIndex)
|
|
738
|
+
|
|
739
|
+
// ── Phase 1: Create per-contract-name partitions ──
|
|
740
|
+
let dynamicPartitions = []
|
|
741
|
+
let nonDynamicPartitions = []
|
|
742
|
+
|
|
743
|
+
let contractNames = registeringContractsByContract->Js.Dict.keys
|
|
744
|
+
for cIdx in 0 to contractNames->Js.Array2.length - 1 {
|
|
745
|
+
let contractName = contractNames->Js.Array2.unsafe_get(cIdx)
|
|
746
|
+
let registeringContracts = registeringContractsByContract->Js.Dict.unsafeGet(contractName)
|
|
747
|
+
let addresses =
|
|
748
|
+
registeringContracts->Js.Dict.keys->(Utils.magic: array<string> => array<Address.t>)
|
|
749
|
+
|
|
750
|
+
// Can unsafely get it, because we already filtered out the contracts
|
|
751
|
+
// that don't have any events to fetch
|
|
752
|
+
let contractConfig = contractConfigs->Js.Dict.unsafeGet(contractName)
|
|
753
|
+
let isDynamic = dynamicContracts->Utils.Set.has(contractName)
|
|
754
|
+
let partitions = isDynamic ? dynamicPartitions : nonDynamicPartitions
|
|
755
|
+
|
|
756
|
+
let byStartBlock = Js.Dict.empty()
|
|
757
|
+
for jdx in 0 to addresses->Array.length - 1 {
|
|
758
|
+
let address = addresses->Js.Array2.unsafe_get(jdx)
|
|
759
|
+
let indexingContract = registeringContracts->Js.Dict.unsafeGet(address->Address.toString)
|
|
760
|
+
byStartBlock->Utils.Dict.push(indexingContract.startBlock->Int.toString, address)
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
// Will be in ASC order by JS spec
|
|
764
|
+
let ascKeys = byStartBlock->Js.Dict.keys
|
|
765
|
+
let initialKey = ascKeys->Utils.Array.firstUnsafe
|
|
766
|
+
|
|
767
|
+
let startBlockRef = ref(initialKey->Int.fromString->Option.getUnsafe)
|
|
768
|
+
let addressesRef = ref(byStartBlock->Js.Dict.unsafeGet(initialKey))
|
|
769
|
+
|
|
770
|
+
for idx in 0 to ascKeys->Js.Array2.length - 1 {
|
|
771
|
+
let maybeNextStartBlockKey =
|
|
772
|
+
ascKeys->Js.Array2.unsafe_get(idx + 1)->(Utils.magic: string => option<string>)
|
|
773
|
+
|
|
774
|
+
// For this case we can't filter out events earlier than contract registration
|
|
775
|
+
// on the client side, so we need to keep the old logic of creating
|
|
776
|
+
// a partition for every block range, so there are no irrelevant events
|
|
777
|
+
let shouldAllocateNewPartition = if contractConfig.filterByAddresses {
|
|
778
|
+
true
|
|
779
|
+
} else {
|
|
780
|
+
switch maybeNextStartBlockKey {
|
|
781
|
+
| None => true
|
|
782
|
+
| Some(nextStartBlockKey) => {
|
|
783
|
+
let nextStartBlock = nextStartBlockKey->Int.fromString->Option.getUnsafe
|
|
784
|
+
let shouldJoinCurrentStartBlock =
|
|
785
|
+
nextStartBlock - startBlockRef.contents < OptimizedPartitions.tooFarBlockRange
|
|
786
|
+
|
|
787
|
+
// If dynamic contract registration are close to eachother
|
|
788
|
+
// and it's possible to use dc.startBlock to filter out events on client side
|
|
789
|
+
// then we can optimize the number of partitions,
|
|
790
|
+
// by putting dcs with different startBlocks in the same partition
|
|
791
|
+
if shouldJoinCurrentStartBlock {
|
|
792
|
+
addressesRef :=
|
|
793
|
+
addressesRef.contents->Array.concat(
|
|
794
|
+
byStartBlock->Js.Dict.unsafeGet(nextStartBlockKey),
|
|
795
|
+
)
|
|
796
|
+
false
|
|
797
|
+
} else {
|
|
798
|
+
true
|
|
799
|
+
}
|
|
800
|
+
}
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
if shouldAllocateNewPartition {
|
|
805
|
+
let latestFetchedBlock = {
|
|
806
|
+
blockNumber: Pervasives.max(startBlockRef.contents - 1, progressBlockNumber),
|
|
807
|
+
blockTimestamp: 0,
|
|
808
|
+
}
|
|
809
|
+
while addressesRef.contents->Array.length > 0 {
|
|
810
|
+
let pAddresses =
|
|
811
|
+
addressesRef.contents->Js.Array2.slice(~start=0, ~end_=maxAddrInPartition)
|
|
812
|
+
addressesRef.contents = addressesRef.contents->Js.Array2.sliceFrom(maxAddrInPartition)
|
|
813
|
+
|
|
814
|
+
let addressesByContractName = Js.Dict.empty()
|
|
815
|
+
addressesByContractName->Js.Dict.set(contractName, pAddresses)
|
|
816
|
+
partitions->Array.push({
|
|
817
|
+
id: nextPartitionIndexRef.contents->Int.toString,
|
|
818
|
+
latestFetchedBlock,
|
|
819
|
+
selection: normalSelection,
|
|
820
|
+
dynamicContract: isDynamic ? Some(contractName) : None,
|
|
821
|
+
addressesByContractName,
|
|
822
|
+
mergeBlock: None,
|
|
823
|
+
mutPendingQueries: [],
|
|
824
|
+
prevQueryRange: 0,
|
|
825
|
+
prevPrevQueryRange: 0,
|
|
826
|
+
latestBlockRangeUpdateBlock: 0,
|
|
827
|
+
})
|
|
828
|
+
nextPartitionIndexRef := nextPartitionIndexRef.contents + 1
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
switch maybeNextStartBlockKey {
|
|
832
|
+
| None => ()
|
|
833
|
+
| Some(nextStartBlockKey) => {
|
|
834
|
+
startBlockRef := nextStartBlockKey->Int.fromString->Option.getUnsafe
|
|
835
|
+
addressesRef := byStartBlock->Js.Dict.unsafeGet(nextStartBlockKey)
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
// ── Phase 2: Merge non-dynamic partitions ──
|
|
843
|
+
let mergedNonDynamic = []
|
|
844
|
+
|
|
845
|
+
if nonDynamicPartitions->Array.length > 0 {
|
|
846
|
+
// Sort non-dynamic partitions by latestFetchedBlock ascending
|
|
847
|
+
let _ = nonDynamicPartitions->Js.Array2.sortInPlaceWith(OptimizedPartitions.ascSortFn)
|
|
848
|
+
|
|
849
|
+
let currentPRef = ref(nonDynamicPartitions->Js.Array2.unsafe_get(0))
|
|
850
|
+
let nextIdx = ref(1)
|
|
851
|
+
|
|
852
|
+
while nextIdx.contents < nonDynamicPartitions->Array.length {
|
|
853
|
+
let nextP = nonDynamicPartitions->Js.Array2.unsafe_get(nextIdx.contents)
|
|
854
|
+
let currentP = currentPRef.contents
|
|
855
|
+
let currentPBlock = currentP.latestFetchedBlock.blockNumber
|
|
856
|
+
let nextPBlock = nextP.latestFetchedBlock.blockNumber
|
|
857
|
+
|
|
858
|
+
// Compute total count WITHOUT mutating any arrays
|
|
859
|
+
let totalCount =
|
|
860
|
+
currentP.addressesByContractName->addressesByContractNameCount +
|
|
861
|
+
nextP.addressesByContractName->addressesByContractNameCount
|
|
862
|
+
|
|
863
|
+
if totalCount > maxAddrInPartition {
|
|
864
|
+
// Exceeds address limit - don't merge, keep partitions separate
|
|
865
|
+
mergedNonDynamic->Js.Array2.push(currentP)->ignore
|
|
866
|
+
currentPRef := nextP
|
|
867
|
+
} else {
|
|
868
|
+
// Build merged addresses using Array.concat (non-mutating)
|
|
869
|
+
let mergedAddresses = nextP.addressesByContractName->Utils.Dict.shallowCopy
|
|
870
|
+
let currentContractNames = currentP.addressesByContractName->Js.Dict.keys
|
|
871
|
+
for jdx in 0 to currentContractNames->Js.Array2.length - 1 {
|
|
872
|
+
let cn = currentContractNames->Js.Array2.unsafe_get(jdx)
|
|
873
|
+
let currentAddrs = currentP.addressesByContractName->Js.Dict.unsafeGet(cn)
|
|
874
|
+
switch mergedAddresses->Utils.Dict.dangerouslyGetNonOption(cn) {
|
|
875
|
+
| Some(existingAddrs) =>
|
|
876
|
+
// Use concat (non-mutating) to avoid corrupting nextP's arrays
|
|
877
|
+
mergedAddresses->Js.Dict.set(cn, existingAddrs->Array.concat(currentAddrs))
|
|
878
|
+
| None => mergedAddresses->Js.Dict.set(cn, currentAddrs)
|
|
879
|
+
}
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
let nextContractName = nextP.addressesByContractName->Js.Dict.keys->Utils.Array.firstUnsafe
|
|
883
|
+
let hasFilterByAddresses = (
|
|
884
|
+
contractConfigs->Js.Dict.unsafeGet(nextContractName)
|
|
885
|
+
).filterByAddresses
|
|
886
|
+
let isTooFar = currentPBlock + OptimizedPartitions.tooFarBlockRange < nextPBlock
|
|
887
|
+
|
|
888
|
+
if isTooFar || hasFilterByAddresses {
|
|
889
|
+
// Too far or address-filtered: mergeBlock on current, merge addresses into next
|
|
890
|
+
mergedNonDynamic
|
|
891
|
+
->Js.Array2.push({
|
|
892
|
+
...currentP,
|
|
893
|
+
mergeBlock: currentPBlock < nextPBlock ? Some(nextPBlock) : None,
|
|
894
|
+
})
|
|
895
|
+
->ignore
|
|
896
|
+
currentPRef := {
|
|
897
|
+
...nextP,
|
|
898
|
+
addressesByContractName: mergedAddresses,
|
|
899
|
+
}
|
|
900
|
+
} else {
|
|
901
|
+
// Close and not address-filtered: push next's addresses into current
|
|
902
|
+
currentPRef := {
|
|
903
|
+
...currentP,
|
|
904
|
+
addressesByContractName: mergedAddresses,
|
|
905
|
+
}
|
|
906
|
+
}
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
nextIdx := nextIdx.contents + 1
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
mergedNonDynamic->Js.Array2.push(currentPRef.contents)->ignore
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
let mergedPartitions = mergedNonDynamic->Js.Array2.concat(dynamicPartitions)
|
|
916
|
+
|
|
917
|
+
// Final step: concat existing partitions with phase 1+2 result and call OptimizedPartitions.make
|
|
918
|
+
OptimizedPartitions.make(
|
|
919
|
+
~partitions=existingPartitions->Js.Array2.concat(mergedPartitions),
|
|
920
|
+
~maxAddrInPartition,
|
|
921
|
+
~nextPartitionIndex=nextPartitionIndexRef.contents,
|
|
922
|
+
~dynamicContracts,
|
|
923
|
+
)
|
|
924
|
+
}
|
|
925
|
+
|
|
328
926
|
let registerDynamicContracts = (
|
|
329
927
|
fetchState: t,
|
|
330
928
|
// These are raw items which might have dynamic contracts received from contractRegister call.
|
|
@@ -333,16 +931,13 @@ let registerDynamicContracts = (
|
|
|
333
931
|
) => {
|
|
334
932
|
if fetchState.normalSelection.eventConfigs->Utils.Array.isEmpty {
|
|
335
933
|
// Can the normalSelection be empty?
|
|
336
|
-
// Probably only on pre-registration, but we don't
|
|
337
|
-
// register dynamic contracts during it
|
|
338
934
|
Js.Exn.raiseError(
|
|
339
935
|
"Invalid configuration. No events to fetch for the dynamic contract registration.",
|
|
340
936
|
)
|
|
341
937
|
}
|
|
342
938
|
|
|
343
939
|
let indexingContracts = fetchState.indexingContracts
|
|
344
|
-
let
|
|
345
|
-
let addressesByContractName = Js.Dict.empty()
|
|
940
|
+
let registeringContractsByContract: dict<dict<Internal.indexingContract>> = Js.Dict.empty()
|
|
346
941
|
let earliestRegisteringEventBlockNumber = ref(%raw(`Infinity`))
|
|
347
942
|
let hasDCWithFilterByAddresses = ref(false)
|
|
348
943
|
|
|
@@ -383,6 +978,8 @@ let registerDynamicContracts = (
|
|
|
383
978
|
}
|
|
384
979
|
shouldRemove := true
|
|
385
980
|
| None =>
|
|
981
|
+
let registeringContracts =
|
|
982
|
+
registeringContractsByContract->Utils.Dict.getOrInsertEmptyDict(dc.contractName)
|
|
386
983
|
let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption(
|
|
387
984
|
dc.address->Address.toString,
|
|
388
985
|
) {
|
|
@@ -395,7 +992,6 @@ let registerDynamicContracts = (
|
|
|
395
992
|
false
|
|
396
993
|
| None =>
|
|
397
994
|
hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses
|
|
398
|
-
addressesByContractName->Utils.Dict.push(dc.contractName, dc.address)
|
|
399
995
|
true
|
|
400
996
|
}
|
|
401
997
|
if shouldUpdate {
|
|
@@ -430,270 +1026,139 @@ let registerDynamicContracts = (
|
|
|
430
1026
|
}
|
|
431
1027
|
}
|
|
432
1028
|
|
|
433
|
-
let
|
|
434
|
-
switch
|
|
1029
|
+
let dcContractNamesToStore = registeringContractsByContract->Js.Dict.keys
|
|
1030
|
+
switch dcContractNamesToStore {
|
|
435
1031
|
// Dont update anything when everything was filter out
|
|
436
1032
|
| [] => fetchState
|
|
437
1033
|
| _ => {
|
|
438
|
-
let newPartitions =
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
}
|
|
515
|
-
latestFetchedBlock: {
|
|
516
|
-
blockNumber: Pervasives.max(startBlockKey->Int.fromString->Option.getExn - 1, 0),
|
|
517
|
-
blockTimestamp: 0,
|
|
518
|
-
},
|
|
519
|
-
selection: fetchState.normalSelection,
|
|
520
|
-
addressesByContractName,
|
|
521
|
-
})
|
|
522
|
-
})
|
|
523
|
-
} else {
|
|
524
|
-
// The goal is to try to split partitions the way,
|
|
525
|
-
// so there are mostly addresses of the same contract in each partition
|
|
526
|
-
// TODO: Should do the same for the initial FetchState creation
|
|
527
|
-
for jdx in 0 to addresses->Array.length - 1 {
|
|
528
|
-
let address = addresses->Js.Array2.unsafe_get(jdx)
|
|
529
|
-
if pendingCount.contents === fetchState.maxAddrInPartition {
|
|
530
|
-
addPartition()
|
|
531
|
-
pendingAddressesByContractName := Js.Dict.empty()
|
|
532
|
-
pendingCount := 0
|
|
533
|
-
earliestRegisteringEventBlockNumber := %raw(`Infinity`)
|
|
1034
|
+
let newPartitions = []
|
|
1035
|
+
let newIndexingContracts = indexingContracts->Utils.Dict.shallowCopy
|
|
1036
|
+
let dynamicContractsRef = ref(fetchState.optimizedPartitions.dynamicContracts)
|
|
1037
|
+
let mutExistingPartitions = fetchState.optimizedPartitions.entities->Js.Dict.values
|
|
1038
|
+
|
|
1039
|
+
for idx in 0 to dcContractNamesToStore->Js.Array2.length - 1 {
|
|
1040
|
+
let contractName = dcContractNamesToStore->Js.Array2.unsafe_get(idx)
|
|
1041
|
+
|
|
1042
|
+
// When a new contract name is added as a dynamic contract for the first time (not in dynamicContracts set):
|
|
1043
|
+
// Walks through existing partitions that have addresses for this contract name
|
|
1044
|
+
// - If partition has ONLY this contract's addresses -> sets dynamicContract field
|
|
1045
|
+
// - If partition has this contract's addresses AND other contracts -> splits them
|
|
1046
|
+
// For the sake of merging simplicity we want to make sure that
|
|
1047
|
+
// partition has addresses of only one contract
|
|
1048
|
+
if !(dynamicContractsRef.contents->Utils.Set.has(contractName)) {
|
|
1049
|
+
dynamicContractsRef := dynamicContractsRef.contents->Utils.Set.immutableAdd(contractName)
|
|
1050
|
+
|
|
1051
|
+
for idx in 0 to mutExistingPartitions->Js.Array2.length - 1 {
|
|
1052
|
+
let p = mutExistingPartitions->Js.Array2.unsafe_get(idx)
|
|
1053
|
+
switch p.addressesByContractName->Utils.Dict.dangerouslyGetNonOption(contractName) {
|
|
1054
|
+
| None => () // Skip partitions which don't have our contract
|
|
1055
|
+
| Some(addresses) =>
|
|
1056
|
+
// Also filter out partitions which are 100% not mergable
|
|
1057
|
+
if p.selection.dependsOnAddresses && p.mergeBlock === None {
|
|
1058
|
+
let allPartitionContractNames = p.addressesByContractName->Js.Dict.keys
|
|
1059
|
+
switch allPartitionContractNames {
|
|
1060
|
+
| [_] =>
|
|
1061
|
+
mutExistingPartitions->Js.Array2.unsafe_set(
|
|
1062
|
+
idx,
|
|
1063
|
+
// Even if it's fetching, set dynamicContract field
|
|
1064
|
+
{
|
|
1065
|
+
...p,
|
|
1066
|
+
dynamicContract: Some(contractName),
|
|
1067
|
+
},
|
|
1068
|
+
)
|
|
1069
|
+
| _ => {
|
|
1070
|
+
let isFetching = p.mutPendingQueries->Array.length > 0
|
|
1071
|
+
if isFetching {
|
|
1072
|
+
// The partition won't be split and won't get a dynamicContract field
|
|
1073
|
+
// This won't allow to optimize the partitions to the potential max
|
|
1074
|
+
// Not super critical - at least we won't have a burden of
|
|
1075
|
+
// splitting a fetching partition and then handing the response
|
|
1076
|
+
()
|
|
1077
|
+
} else {
|
|
1078
|
+
let newPartitionId =
|
|
1079
|
+
(fetchState.optimizedPartitions.nextPartitionIndex +
|
|
1080
|
+
newPartitions->Array.length)->Int.toString
|
|
1081
|
+
|
|
1082
|
+
let restAddressesByContractName =
|
|
1083
|
+
p.addressesByContractName->Utils.Dict.shallowCopy
|
|
1084
|
+
restAddressesByContractName->Utils.Dict.deleteInPlace(contractName)
|
|
1085
|
+
|
|
1086
|
+
mutExistingPartitions->Js.Array2.unsafe_set(
|
|
1087
|
+
idx,
|
|
1088
|
+
{
|
|
1089
|
+
...p,
|
|
1090
|
+
addressesByContractName: restAddressesByContractName,
|
|
1091
|
+
},
|
|
1092
|
+
)
|
|
1093
|
+
|
|
1094
|
+
let addressesByContractName = Js.Dict.empty()
|
|
1095
|
+
addressesByContractName->Js.Dict.set(contractName, addresses)
|
|
1096
|
+
newPartitions->Array.push({
|
|
1097
|
+
id: newPartitionId,
|
|
1098
|
+
latestFetchedBlock: p.latestFetchedBlock,
|
|
1099
|
+
selection: fetchState.normalSelection,
|
|
1100
|
+
dynamicContract: Some(contractName),
|
|
1101
|
+
addressesByContractName,
|
|
1102
|
+
mergeBlock: None,
|
|
1103
|
+
mutPendingQueries: p.mutPendingQueries,
|
|
1104
|
+
prevQueryRange: p.prevQueryRange,
|
|
1105
|
+
prevPrevQueryRange: p.prevPrevQueryRange,
|
|
1106
|
+
latestBlockRangeUpdateBlock: p.latestBlockRangeUpdateBlock,
|
|
1107
|
+
})
|
|
1108
|
+
}
|
|
1109
|
+
}
|
|
1110
|
+
}
|
|
534
1111
|
}
|
|
535
|
-
|
|
536
|
-
let indexingContract =
|
|
537
|
-
registeringContracts->Js.Dict.unsafeGet(address->Address.toString)
|
|
538
|
-
|
|
539
|
-
pendingCount := pendingCount.contents + 1
|
|
540
|
-
pendingAddressesByContractName.contents->Utils.Dict.push(contractName, address)
|
|
541
|
-
earliestRegisteringEventBlockNumber :=
|
|
542
|
-
Pervasives.min(
|
|
543
|
-
earliestRegisteringEventBlockNumber.contents,
|
|
544
|
-
indexingContract.startBlock,
|
|
545
|
-
)
|
|
546
1112
|
}
|
|
547
1113
|
}
|
|
548
1114
|
}
|
|
549
1115
|
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
partitions
|
|
1116
|
+
let registeringContracts = registeringContractsByContract->Js.Dict.unsafeGet(contractName)
|
|
1117
|
+
let _ = Utils.Dict.mergeInPlace(newIndexingContracts, registeringContracts)
|
|
555
1118
|
}
|
|
556
1119
|
|
|
557
|
-
|
|
558
|
-
~
|
|
559
|
-
~
|
|
1120
|
+
let optimizedPartitions = createPartitionsFromIndexingAddresses(
|
|
1121
|
+
~registeringContractsByContract,
|
|
1122
|
+
~contractConfigs=fetchState.contractConfigs,
|
|
1123
|
+
~dynamicContracts=dynamicContractsRef.contents,
|
|
1124
|
+
~normalSelection=fetchState.normalSelection,
|
|
1125
|
+
~maxAddrInPartition=fetchState.optimizedPartitions.maxAddrInPartition,
|
|
1126
|
+
~nextPartitionIndex=fetchState.optimizedPartitions.nextPartitionIndex +
|
|
1127
|
+
newPartitions->Array.length,
|
|
1128
|
+
~existingPartitions=mutExistingPartitions->Js.Array2.concat(newPartitions),
|
|
1129
|
+
~progressBlockNumber=0,
|
|
560
1130
|
)
|
|
561
1131
|
|
|
562
|
-
fetchState->updateInternal(
|
|
563
|
-
~partitions=fetchState.partitions->Js.Array2.concat(newPartitions),
|
|
564
|
-
~indexingContracts=// We don't need registeringContracts anymore,
|
|
565
|
-
// so we can safely mixin indexingContracts in it
|
|
566
|
-
// The original indexingContracts won't be mutated
|
|
567
|
-
Utils.Dict.mergeInPlace(registeringContracts, indexingContracts),
|
|
568
|
-
~nextPartitionIndex=fetchState.nextPartitionIndex + newPartitions->Array.length,
|
|
569
|
-
)
|
|
1132
|
+
fetchState->updateInternal(~optimizedPartitions, ~indexingContracts=newIndexingContracts)
|
|
570
1133
|
}
|
|
571
1134
|
}
|
|
572
1135
|
}
|
|
573
1136
|
|
|
574
|
-
type queryTarget =
|
|
575
|
-
| Head
|
|
576
|
-
| EndBlock({toBlock: int})
|
|
577
|
-
| Merge({
|
|
578
|
-
// The partition we are going to merge into
|
|
579
|
-
// It shouldn't be fetching during the query
|
|
580
|
-
intoPartitionId: string,
|
|
581
|
-
toBlock: int,
|
|
582
|
-
})
|
|
583
|
-
|
|
584
|
-
type query = {
|
|
585
|
-
partitionId: string,
|
|
586
|
-
fromBlock: int,
|
|
587
|
-
selection: selection,
|
|
588
|
-
addressesByContractName: dict<array<Address.t>>,
|
|
589
|
-
target: queryTarget,
|
|
590
|
-
indexingContracts: dict<Internal.indexingContract>,
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
exception UnexpectedPartitionNotFound({partitionId: string})
|
|
594
|
-
exception UnexpectedMergeQueryResponse({message: string})
|
|
595
|
-
|
|
596
1137
|
/*
|
|
597
1138
|
Updates fetchState with a response for a given query.
|
|
598
1139
|
Returns Error if the partition with given query cannot be found (unexpected)
|
|
599
|
-
If MergeQuery caught up to the target partition, it triggers the merge of the partitions.
|
|
600
1140
|
|
|
601
1141
|
newItems are ordered earliest to latest (as they are returned from the worker)
|
|
602
1142
|
*/
|
|
603
1143
|
let handleQueryResult = (
|
|
604
|
-
|
|
1144
|
+
fetchState: t,
|
|
605
1145
|
~query: query,
|
|
606
1146
|
~latestFetchedBlock: blockNumberAndTimestamp,
|
|
607
1147
|
~newItems,
|
|
608
|
-
):
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
},
|
|
620
|
-
latestFetchedBlock,
|
|
621
|
-
}
|
|
622
|
-
|
|
623
|
-
switch query.target {
|
|
624
|
-
| Head
|
|
625
|
-
| EndBlock(_) =>
|
|
626
|
-
Ok(partitions->Utils.Array.setIndexImmutable(pIndex, updatedPartition))
|
|
627
|
-
| Merge({intoPartitionId}) =>
|
|
628
|
-
switch partitions->Array.getIndexBy(p => p.id === intoPartitionId) {
|
|
629
|
-
| Some(targetIndex)
|
|
630
|
-
if (partitions->Js.Array2.unsafe_get(targetIndex)).latestFetchedBlock.blockNumber ===
|
|
631
|
-
latestFetchedBlock.blockNumber => {
|
|
632
|
-
let target = partitions->Js.Array2.unsafe_get(targetIndex)
|
|
633
|
-
let (merged, rest) =
|
|
634
|
-
updatedPartition->mergeIntoPartition(
|
|
635
|
-
~target,
|
|
636
|
-
~maxAddrInPartition=fetchState.maxAddrInPartition,
|
|
637
|
-
)
|
|
638
|
-
|
|
639
|
-
let updatedPartitions = partitions->Utils.Array.setIndexImmutable(targetIndex, merged)
|
|
640
|
-
let updatedPartitions = switch rest {
|
|
641
|
-
| Some(rest) => {
|
|
642
|
-
updatedPartitions->Js.Array2.unsafe_set(pIndex, rest)
|
|
643
|
-
updatedPartitions
|
|
644
|
-
}
|
|
645
|
-
| None => updatedPartitions->Utils.Array.removeAtIndex(pIndex)
|
|
646
|
-
}
|
|
647
|
-
Ok(updatedPartitions)
|
|
648
|
-
}
|
|
649
|
-
| _ => Ok(partitions->Utils.Array.setIndexImmutable(pIndex, updatedPartition))
|
|
650
|
-
}
|
|
1148
|
+
): t => {
|
|
1149
|
+
fetchState->updateInternal(
|
|
1150
|
+
~optimizedPartitions=fetchState.optimizedPartitions->OptimizedPartitions.handleQueryResponse(
|
|
1151
|
+
~query,
|
|
1152
|
+
~knownHeight=fetchState.knownHeight,
|
|
1153
|
+
~latestFetchedBlock,
|
|
1154
|
+
),
|
|
1155
|
+
~mutItems=?{
|
|
1156
|
+
switch newItems {
|
|
1157
|
+
| [] => None
|
|
1158
|
+
| _ => Some(fetchState.buffer->Array.concat(newItems))
|
|
651
1159
|
}
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
UnexpectedPartitionNotFound({
|
|
655
|
-
partitionId: partitionId,
|
|
656
|
-
}),
|
|
657
|
-
)
|
|
658
|
-
}
|
|
659
|
-
}->Result.map(partitions => {
|
|
660
|
-
fetchState->updateInternal(
|
|
661
|
-
~partitions,
|
|
662
|
-
~mutItems=?{
|
|
663
|
-
switch newItems {
|
|
664
|
-
| [] => None
|
|
665
|
-
| _ => Some(fetchState.buffer->Array.concat(newItems))
|
|
666
|
-
}
|
|
667
|
-
},
|
|
668
|
-
)
|
|
669
|
-
})
|
|
670
|
-
|
|
671
|
-
let makePartitionQuery = (p: partition, ~indexingContracts, ~endBlock, ~mergeTarget) => {
|
|
672
|
-
let fromBlock = switch p.latestFetchedBlock.blockNumber {
|
|
673
|
-
| 0 => 0
|
|
674
|
-
| latestFetchedBlockNumber => latestFetchedBlockNumber + 1
|
|
675
|
-
}
|
|
676
|
-
switch (endBlock, mergeTarget) {
|
|
677
|
-
| (Some(endBlock), _) if fromBlock > endBlock => None
|
|
678
|
-
| (_, Some(mergeTarget)) =>
|
|
679
|
-
Some(
|
|
680
|
-
Merge({
|
|
681
|
-
toBlock: mergeTarget.latestFetchedBlock.blockNumber,
|
|
682
|
-
intoPartitionId: mergeTarget.id,
|
|
683
|
-
}),
|
|
684
|
-
)
|
|
685
|
-
| (Some(endBlock), None) => Some(EndBlock({toBlock: endBlock}))
|
|
686
|
-
| (None, None) => Some(Head)
|
|
687
|
-
}->Option.map(target => {
|
|
688
|
-
{
|
|
689
|
-
partitionId: p.id,
|
|
690
|
-
fromBlock,
|
|
691
|
-
target,
|
|
692
|
-
selection: p.selection,
|
|
693
|
-
addressesByContractName: p.addressesByContractName,
|
|
694
|
-
indexingContracts,
|
|
695
|
-
}
|
|
696
|
-
})
|
|
1160
|
+
},
|
|
1161
|
+
)
|
|
697
1162
|
}
|
|
698
1163
|
|
|
699
1164
|
type nextQuery =
|
|
@@ -702,200 +1167,235 @@ type nextQuery =
|
|
|
702
1167
|
| NothingToQuery
|
|
703
1168
|
| Ready(array<query>)
|
|
704
1169
|
|
|
705
|
-
let startFetchingQueries = ({
|
|
706
|
-
queries->Array.
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
// The status will be immutably set to the initial one when we handle response
|
|
710
|
-
| Some(p) => p.status.fetchingStateId = Some(stateId)
|
|
711
|
-
| None => Js.Exn.raiseError("Unexpected case: Couldn't find partition for the fetching query")
|
|
712
|
-
}
|
|
713
|
-
})
|
|
714
|
-
}
|
|
1170
|
+
let startFetchingQueries = ({optimizedPartitions}: t, ~queries: array<query>) => {
|
|
1171
|
+
for qIdx in 0 to queries->Array.length - 1 {
|
|
1172
|
+
let q = queries->Js.Array2.unsafe_get(qIdx)
|
|
1173
|
+
let p = optimizedPartitions->OptimizedPartitions.getOrThrow(~partitionId=q.partitionId)
|
|
715
1174
|
|
|
716
|
-
let
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
numAddresses.contents + addressesByContractName->Js.Dict.unsafeGet(contractName)->Array.length
|
|
723
|
-
}
|
|
724
|
-
numAddresses.contents
|
|
725
|
-
}
|
|
1175
|
+
let pq = {
|
|
1176
|
+
fromBlock: q.fromBlock,
|
|
1177
|
+
toBlock: q.toBlock,
|
|
1178
|
+
isChunk: q.isChunk,
|
|
1179
|
+
fetchedBlock: None,
|
|
1180
|
+
}
|
|
726
1181
|
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
1182
|
+
// Insert in sorted order by fromBlock to maintain queue invariant.
|
|
1183
|
+
// Gap-fill queries may have lower fromBlock than existing pending queries.
|
|
1184
|
+
let inserted = ref(false)
|
|
1185
|
+
let i = ref(0)
|
|
1186
|
+
while i.contents < p.mutPendingQueries->Array.length && !inserted.contents {
|
|
1187
|
+
if (p.mutPendingQueries->Js.Array2.unsafe_get(i.contents)).fromBlock > q.fromBlock {
|
|
1188
|
+
p.mutPendingQueries->Js.Array2.spliceInPlace(~pos=i.contents, ~remove=0, ~add=[pq])->ignore
|
|
1189
|
+
inserted := true
|
|
1190
|
+
}
|
|
1191
|
+
i := i.contents + 1
|
|
1192
|
+
}
|
|
1193
|
+
if !inserted.contents {
|
|
1194
|
+
p.mutPendingQueries->Array.push(pq)->ignore
|
|
1195
|
+
}
|
|
733
1196
|
}
|
|
734
|
-
all.contents
|
|
735
1197
|
}
|
|
736
1198
|
|
|
737
1199
|
@inline
|
|
738
|
-
let
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
1200
|
+
let pushQueriesForRange = (
|
|
1201
|
+
queries: array<query>,
|
|
1202
|
+
~partitionId: string,
|
|
1203
|
+
~rangeFromBlock: int,
|
|
1204
|
+
~rangeEndBlock: option<int>,
|
|
1205
|
+
~maxQueryBlockNumber: int,
|
|
1206
|
+
~maybeChunkRange: option<int>,
|
|
1207
|
+
~selection: selection,
|
|
1208
|
+
~addressesByContractName: dict<array<Address.t>>,
|
|
1209
|
+
~indexingContracts: dict<Internal.indexingContract>,
|
|
1210
|
+
) => {
|
|
1211
|
+
if rangeFromBlock <= maxQueryBlockNumber {
|
|
1212
|
+
switch rangeEndBlock {
|
|
1213
|
+
| Some(endBlock) if rangeFromBlock > endBlock => ()
|
|
1214
|
+
| _ =>
|
|
1215
|
+
switch maybeChunkRange {
|
|
1216
|
+
| None =>
|
|
1217
|
+
queries->Array.push({
|
|
1218
|
+
partitionId,
|
|
1219
|
+
fromBlock: rangeFromBlock,
|
|
1220
|
+
toBlock: rangeEndBlock,
|
|
1221
|
+
selection,
|
|
1222
|
+
isChunk: false,
|
|
1223
|
+
addressesByContractName,
|
|
1224
|
+
indexingContracts,
|
|
1225
|
+
})
|
|
1226
|
+
| Some(chunkRange) =>
|
|
1227
|
+
let maxBlock = switch rangeEndBlock {
|
|
1228
|
+
| Some(eb) => eb
|
|
1229
|
+
| None => maxQueryBlockNumber
|
|
1230
|
+
}
|
|
1231
|
+
let chunkSize = Js.Math.ceil_int(chunkRange->Int.toFloat *. 1.8)
|
|
1232
|
+
if rangeFromBlock + 2 * chunkSize - 1 <= maxBlock {
|
|
1233
|
+
// Create 2 chunks of ceil(1.8 * chunkRange) each
|
|
1234
|
+
queries->Array.push({
|
|
1235
|
+
partitionId,
|
|
1236
|
+
fromBlock: rangeFromBlock,
|
|
1237
|
+
toBlock: Some(rangeFromBlock + chunkSize - 1),
|
|
1238
|
+
isChunk: true,
|
|
1239
|
+
selection,
|
|
1240
|
+
addressesByContractName,
|
|
1241
|
+
indexingContracts,
|
|
1242
|
+
})
|
|
1243
|
+
queries->Array.push({
|
|
1244
|
+
partitionId,
|
|
1245
|
+
fromBlock: rangeFromBlock + chunkSize,
|
|
1246
|
+
toBlock: Some(rangeFromBlock + 2 * chunkSize - 1),
|
|
1247
|
+
isChunk: true,
|
|
1248
|
+
selection,
|
|
1249
|
+
addressesByContractName,
|
|
1250
|
+
indexingContracts,
|
|
1251
|
+
})
|
|
1252
|
+
} else {
|
|
1253
|
+
// Not enough room for 2 chunks, fall back to a single query
|
|
1254
|
+
queries->Array.push({
|
|
1255
|
+
partitionId,
|
|
1256
|
+
fromBlock: rangeFromBlock,
|
|
1257
|
+
toBlock: rangeEndBlock,
|
|
1258
|
+
selection,
|
|
1259
|
+
isChunk: rangeEndBlock !== None,
|
|
1260
|
+
addressesByContractName,
|
|
1261
|
+
indexingContracts,
|
|
1262
|
+
})
|
|
1263
|
+
}
|
|
1264
|
+
}
|
|
1265
|
+
}
|
|
742
1266
|
}
|
|
743
1267
|
}
|
|
744
1268
|
|
|
745
1269
|
let getNextQuery = (
|
|
746
1270
|
{
|
|
747
1271
|
buffer,
|
|
748
|
-
|
|
1272
|
+
optimizedPartitions,
|
|
749
1273
|
targetBufferSize,
|
|
750
|
-
maxAddrInPartition,
|
|
751
|
-
endBlock,
|
|
752
1274
|
indexingContracts,
|
|
753
1275
|
blockLag,
|
|
754
|
-
|
|
1276
|
+
latestOnBlockBlockNumber,
|
|
1277
|
+
knownHeight,
|
|
1278
|
+
} as fetchState: t,
|
|
755
1279
|
~concurrencyLimit,
|
|
756
|
-
~currentBlockHeight,
|
|
757
|
-
~stateId,
|
|
758
1280
|
) => {
|
|
759
|
-
let
|
|
760
|
-
if
|
|
1281
|
+
let headBlockNumber = knownHeight - blockLag
|
|
1282
|
+
if headBlockNumber <= 0 {
|
|
761
1283
|
WaitingForNewBlock
|
|
762
1284
|
} else if concurrencyLimit === 0 {
|
|
763
1285
|
ReachedMaxConcurrency
|
|
764
1286
|
} else {
|
|
765
|
-
let
|
|
766
|
-
let mergingPartitions = []
|
|
767
|
-
let areMergingPartitionsFetching = ref(false)
|
|
768
|
-
let mostBehindMergingPartition = ref(None)
|
|
769
|
-
let mergingPartitionTarget = ref(None)
|
|
1287
|
+
let isOnBlockBehindTheHead = latestOnBlockBlockNumber < headBlockNumber
|
|
770
1288
|
let shouldWaitForNewBlock = ref(
|
|
771
|
-
switch endBlock {
|
|
772
|
-
| Some(endBlock) =>
|
|
1289
|
+
switch fetchState.endBlock {
|
|
1290
|
+
| Some(endBlock) => headBlockNumber < endBlock
|
|
773
1291
|
| None => true
|
|
774
|
-
}
|
|
1292
|
+
} &&
|
|
1293
|
+
!isOnBlockBehindTheHead,
|
|
775
1294
|
)
|
|
776
1295
|
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
1296
|
+
// We want to limit the buffer size to targetBufferSize (usually 3 * batchSize)
|
|
1297
|
+
// To make sure the processing always has some buffer
|
|
1298
|
+
// and not increase the memory usage too much
|
|
1299
|
+
// If a partition fetched further
|
|
1300
|
+
// it should be skipped until the buffer is consumed
|
|
1301
|
+
let maxQueryBlockNumber = {
|
|
1302
|
+
switch buffer->Array.get(targetBufferSize - 1) {
|
|
1303
|
+
| Some(item) =>
|
|
1304
|
+
// Just in case check that we don't query beyond the current block
|
|
1305
|
+
Pervasives.min(item->Internal.getItemBlockNumber, knownHeight)
|
|
1306
|
+
| None => knownHeight
|
|
781
1307
|
}
|
|
782
1308
|
}
|
|
783
1309
|
|
|
784
|
-
|
|
785
|
-
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
1310
|
+
let queries = []
|
|
786
1311
|
|
|
787
|
-
|
|
788
|
-
|
|
1312
|
+
let partitionsCount = optimizedPartitions.idsInAscOrder->Js.Array2.length
|
|
1313
|
+
let idxRef = ref(0)
|
|
1314
|
+
while idxRef.contents < partitionsCount {
|
|
1315
|
+
let idx = idxRef.contents
|
|
1316
|
+
let partitionId = optimizedPartitions.idsInAscOrder->Js.Array2.unsafe_get(idx)
|
|
1317
|
+
let p = optimizedPartitions.entities->Js.Dict.unsafeGet(partitionId)
|
|
789
1318
|
|
|
790
|
-
|
|
1319
|
+
let isBehindTheHead = p.latestFetchedBlock.blockNumber < headBlockNumber
|
|
1320
|
+
let hasPendingQueries = p.mutPendingQueries->Utils.Array.notEmpty
|
|
1321
|
+
|
|
1322
|
+
if hasPendingQueries || isBehindTheHead {
|
|
791
1323
|
// Even if there are some partitions waiting for the new block
|
|
792
1324
|
// We still want to wait for all partitions reaching the head
|
|
793
|
-
// because they might update
|
|
1325
|
+
// because they might update knownHeight in their response
|
|
794
1326
|
// Also, there are cases when some partitions fetching at 50% of the chain
|
|
795
1327
|
// and we don't want to poll the head for a few small partitions
|
|
796
1328
|
shouldWaitForNewBlock := false
|
|
797
1329
|
}
|
|
798
1330
|
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
if (
|
|
808
|
-
// The = check is important here. We don't want to have a target
|
|
809
|
-
// with the same latestFetchedBlock. They should be merged in separate queries
|
|
810
|
-
mostBehindMergingPartition.latestFetchedBlock.blockNumber ===
|
|
811
|
-
p.latestFetchedBlock.blockNumber
|
|
812
|
-
) {
|
|
813
|
-
mostBehindMergingPartition
|
|
814
|
-
} else if (
|
|
815
|
-
mostBehindMergingPartition.latestFetchedBlock.blockNumber <
|
|
816
|
-
p.latestFetchedBlock.blockNumber
|
|
817
|
-
) {
|
|
818
|
-
mergingPartitionTarget :=
|
|
819
|
-
switch mergingPartitionTarget.contents {
|
|
820
|
-
| Some(mergingPartitionTarget)
|
|
821
|
-
if mergingPartitionTarget.latestFetchedBlock.blockNumber <
|
|
822
|
-
p.latestFetchedBlock.blockNumber => mergingPartitionTarget
|
|
823
|
-
| _ => p
|
|
824
|
-
}->Some
|
|
825
|
-
mostBehindMergingPartition
|
|
826
|
-
} else {
|
|
827
|
-
mergingPartitionTarget := Some(mostBehindMergingPartition)
|
|
828
|
-
p
|
|
829
|
-
}
|
|
830
|
-
| None => p
|
|
831
|
-
}->Some
|
|
832
|
-
|
|
833
|
-
if isFetching {
|
|
834
|
-
areMergingPartitionsFetching := true
|
|
835
|
-
}
|
|
1331
|
+
// Compute queryEndBlock for this partition
|
|
1332
|
+
let queryEndBlock = Utils.Math.minOptInt(fetchState.endBlock, p.mergeBlock)
|
|
1333
|
+
let queryEndBlock = switch blockLag {
|
|
1334
|
+
| 0 => queryEndBlock
|
|
1335
|
+
| _ =>
|
|
1336
|
+
// Force head block as an endBlock when blockLag is set
|
|
1337
|
+
// because otherwise HyperSync might return bigger range
|
|
1338
|
+
Utils.Math.minOptInt(Some(headBlockNumber), queryEndBlock)
|
|
836
1339
|
}
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
switch buffer->Array.get(targetBufferSize - 1) {
|
|
846
|
-
| Some(item) =>
|
|
847
|
-
// Just in case check that we don't query beyond the current block
|
|
848
|
-
Pervasives.min(item->Internal.getItemBlockNumber, currentBlockHeight)
|
|
849
|
-
| None => currentBlockHeight
|
|
1340
|
+
// Enforce the response range up until target block
|
|
1341
|
+
// Otherwise for indexers with 100+ partitions
|
|
1342
|
+
// we might blow up the buffer size to more than 600k events
|
|
1343
|
+
// simply because of HyperSync returning extra blocks
|
|
1344
|
+
let queryEndBlock = switch (queryEndBlock, maxQueryBlockNumber < knownHeight) {
|
|
1345
|
+
| (Some(endBlock), true) => Some(Pervasives.min(maxQueryBlockNumber, endBlock))
|
|
1346
|
+
| (None, true) => Some(maxQueryBlockNumber)
|
|
1347
|
+
| (_, false) => queryEndBlock
|
|
850
1348
|
}
|
|
851
|
-
}
|
|
852
|
-
let queries = []
|
|
853
1349
|
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
)
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
1350
|
+
let maybeChunkRange = getMinHistoryRange(p)
|
|
1351
|
+
|
|
1352
|
+
// Walk pending queries to find open ranges and create queries for each
|
|
1353
|
+
let cursor = ref(p.latestFetchedBlock.blockNumber + 1)
|
|
1354
|
+
let canContinue = ref(true)
|
|
1355
|
+
let pqIdx = ref(0)
|
|
1356
|
+
while pqIdx.contents < p.mutPendingQueries->Array.length && canContinue.contents {
|
|
1357
|
+
let pq = p.mutPendingQueries->Js.Array2.unsafe_get(pqIdx.contents)
|
|
1358
|
+
|
|
1359
|
+
// Gap before this pending query → create queries for the gap range
|
|
1360
|
+
if pq.fromBlock > cursor.contents {
|
|
1361
|
+
pushQueriesForRange(
|
|
1362
|
+
queries,
|
|
1363
|
+
~partitionId,
|
|
1364
|
+
~rangeFromBlock=cursor.contents,
|
|
1365
|
+
~rangeEndBlock=Utils.Math.minOptInt(Some(pq.fromBlock - 1), queryEndBlock),
|
|
1366
|
+
~maxQueryBlockNumber,
|
|
1367
|
+
~maybeChunkRange,
|
|
1368
|
+
~selection=p.selection,
|
|
1369
|
+
~addressesByContractName=p.addressesByContractName,
|
|
1370
|
+
~indexingContracts,
|
|
1371
|
+
)
|
|
867
1372
|
}
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
|
874
|
-
| (None, true) => Some(maxQueryBlockNumber)
|
|
875
|
-
| (_, false) => endBlock
|
|
876
|
-
}
|
|
877
|
-
|
|
878
|
-
switch p->makePartitionQuery(~indexingContracts, ~endBlock, ~mergeTarget) {
|
|
879
|
-
| Some(q) => queries->Array.push(q)
|
|
880
|
-
| None => ()
|
|
1373
|
+
switch pq {
|
|
1374
|
+
| {isChunk: true, toBlock: Some(toBlock), fetchedBlock: Some({blockNumber})}
|
|
1375
|
+
if blockNumber < toBlock =>
|
|
1376
|
+
cursor := blockNumber + 1
|
|
1377
|
+
| {isChunk: true, toBlock: Some(toBlock)} => cursor := toBlock + 1
|
|
1378
|
+
| _ => canContinue := false
|
|
881
1379
|
}
|
|
1380
|
+
pqIdx := pqIdx.contents + 1
|
|
882
1381
|
}
|
|
883
|
-
}
|
|
884
|
-
|
|
885
|
-
fullPartitions->Array.forEach(p => p->registerPartitionQuery)
|
|
886
1382
|
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
1383
|
+
// Tail range after all pending queries
|
|
1384
|
+
if canContinue.contents {
|
|
1385
|
+
pushQueriesForRange(
|
|
1386
|
+
queries,
|
|
1387
|
+
~partitionId,
|
|
1388
|
+
~rangeFromBlock=cursor.contents,
|
|
1389
|
+
~rangeEndBlock=queryEndBlock,
|
|
1390
|
+
~maxQueryBlockNumber,
|
|
1391
|
+
~maybeChunkRange,
|
|
1392
|
+
~selection=p.selection,
|
|
1393
|
+
~addressesByContractName=p.addressesByContractName,
|
|
1394
|
+
~indexingContracts,
|
|
1395
|
+
)
|
|
898
1396
|
}
|
|
1397
|
+
|
|
1398
|
+
idxRef := idxRef.contents + 1
|
|
899
1399
|
}
|
|
900
1400
|
|
|
901
1401
|
if queries->Utils.Array.isEmpty {
|
|
@@ -905,15 +1405,14 @@ let getNextQuery = (
|
|
|
905
1405
|
NothingToQuery
|
|
906
1406
|
}
|
|
907
1407
|
} else {
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
)
|
|
1408
|
+
// Enforce concurrency limit: sort by fromBlock and take the first concurrencyLimit
|
|
1409
|
+
let queries = if queries->Array.length > concurrencyLimit {
|
|
1410
|
+
queries->Js.Array2.sortInPlaceWith((a, b) => a.fromBlock - b.fromBlock)->ignore
|
|
1411
|
+
queries->Js.Array2.slice(~start=0, ~end_=concurrencyLimit)
|
|
1412
|
+
} else {
|
|
1413
|
+
queries
|
|
1414
|
+
}
|
|
1415
|
+
Ready(queries)
|
|
917
1416
|
}
|
|
918
1417
|
}
|
|
919
1418
|
}
|
|
@@ -968,9 +1467,11 @@ let make = (
|
|
|
968
1467
|
~maxAddrInPartition,
|
|
969
1468
|
~chainId,
|
|
970
1469
|
~targetBufferSize,
|
|
1470
|
+
~knownHeight,
|
|
971
1471
|
~progressBlockNumber=startBlock - 1,
|
|
972
1472
|
~onBlockConfigs=[],
|
|
973
1473
|
~blockLag=0,
|
|
1474
|
+
~firstEventBlock=None,
|
|
974
1475
|
): t => {
|
|
975
1476
|
let latestFetchedBlock = {
|
|
976
1477
|
blockTimestamp: 0,
|
|
@@ -1007,15 +1508,18 @@ let make = (
|
|
|
1007
1508
|
if notDependingOnAddresses->Array.length > 0 {
|
|
1008
1509
|
partitions->Array.push({
|
|
1009
1510
|
id: partitions->Array.length->Int.toString,
|
|
1010
|
-
status: {
|
|
1011
|
-
fetchingStateId: None,
|
|
1012
|
-
},
|
|
1013
1511
|
latestFetchedBlock,
|
|
1014
1512
|
selection: {
|
|
1015
1513
|
dependsOnAddresses: false,
|
|
1016
1514
|
eventConfigs: notDependingOnAddresses,
|
|
1017
1515
|
},
|
|
1018
1516
|
addressesByContractName: Js.Dict.empty(),
|
|
1517
|
+
mergeBlock: None,
|
|
1518
|
+
dynamicContract: None,
|
|
1519
|
+
mutPendingQueries: [],
|
|
1520
|
+
prevQueryRange: 0,
|
|
1521
|
+
prevPrevQueryRange: 0,
|
|
1522
|
+
latestBlockRangeUpdateBlock: 0,
|
|
1019
1523
|
})
|
|
1020
1524
|
}
|
|
1021
1525
|
|
|
@@ -1024,56 +1528,51 @@ let make = (
|
|
|
1024
1528
|
eventConfigs: normalEventConfigs,
|
|
1025
1529
|
}
|
|
1026
1530
|
|
|
1531
|
+
let registeringContractsByContract: dict<dict<Internal.indexingContract>> = Js.Dict.empty()
|
|
1532
|
+
let dynamicContracts = Utils.Set.make()
|
|
1533
|
+
|
|
1027
1534
|
switch normalEventConfigs {
|
|
1028
1535
|
| [] => ()
|
|
1029
|
-
| _ =>
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
let pendingNormalPartition = ref(makePendingNormalPartition())
|
|
1043
|
-
|
|
1044
|
-
contracts->Array.forEach(contract => {
|
|
1045
|
-
let contractName = contract.contractName
|
|
1046
|
-
if contractNamesWithNormalEvents->Utils.Set.has(contractName) {
|
|
1047
|
-
let pendingPartition = pendingNormalPartition.contents
|
|
1048
|
-
pendingPartition.addressesByContractName->Utils.Dict.push(contractName, contract.address)
|
|
1049
|
-
indexingContracts->Js.Dict.set(contract.address->Address.toString, contract)
|
|
1050
|
-
if (
|
|
1051
|
-
pendingPartition.addressesByContractName->addressesByContractNameCount ===
|
|
1052
|
-
maxAddrInPartition
|
|
1053
|
-
) {
|
|
1054
|
-
// FIXME: should split into separate partitions
|
|
1055
|
-
// depending on the start block
|
|
1056
|
-
partitions->Array.push(pendingPartition)
|
|
1057
|
-
pendingNormalPartition := makePendingNormalPartition()
|
|
1058
|
-
}
|
|
1536
|
+
| _ =>
|
|
1537
|
+
contracts->Array.forEach(contract => {
|
|
1538
|
+
let contractName = contract.contractName
|
|
1539
|
+
if contractNamesWithNormalEvents->Utils.Set.has(contractName) {
|
|
1540
|
+
let registeringContracts =
|
|
1541
|
+
registeringContractsByContract->Utils.Dict.getOrInsertEmptyDict(contractName)
|
|
1542
|
+
registeringContracts->Js.Dict.set(contract.address->Address.toString, contract)
|
|
1543
|
+
indexingContracts->Js.Dict.set(contract.address->Address.toString, contract)
|
|
1544
|
+
|
|
1545
|
+
// Detect dynamic contracts by registrationBlock
|
|
1546
|
+
if contract.registrationBlock !== None {
|
|
1547
|
+
dynamicContracts->Utils.Set.add(contractName)->ignore
|
|
1059
1548
|
}
|
|
1060
|
-
})
|
|
1061
|
-
|
|
1062
|
-
if pendingNormalPartition.contents.addressesByContractName->addressesByContractNameCount > 0 {
|
|
1063
|
-
partitions->Array.push(pendingNormalPartition.contents)
|
|
1064
1549
|
}
|
|
1065
|
-
}
|
|
1550
|
+
})
|
|
1066
1551
|
}
|
|
1067
1552
|
|
|
1068
|
-
|
|
1553
|
+
let optimizedPartitions = createPartitionsFromIndexingAddresses(
|
|
1554
|
+
~registeringContractsByContract,
|
|
1555
|
+
~contractConfigs,
|
|
1556
|
+
~dynamicContracts,
|
|
1557
|
+
~normalSelection,
|
|
1558
|
+
~maxAddrInPartition,
|
|
1559
|
+
~nextPartitionIndex=partitions->Array.length,
|
|
1560
|
+
~existingPartitions=partitions, // wildcard partition(s) if any
|
|
1561
|
+
~progressBlockNumber,
|
|
1562
|
+
)
|
|
1563
|
+
|
|
1564
|
+
if optimizedPartitions->OptimizedPartitions.count === 0 && onBlockConfigs->Utils.Array.isEmpty {
|
|
1069
1565
|
Js.Exn.raiseError(
|
|
1070
|
-
"Invalid configuration: Nothing to fetch. Make sure that you provided at least one contract address to index, or have events with Wildcard mode enabled.",
|
|
1566
|
+
"Invalid configuration: Nothing to fetch. Make sure that you provided at least one contract address to index, or have events with Wildcard mode enabled, or have onBlock handlers.",
|
|
1071
1567
|
)
|
|
1072
1568
|
}
|
|
1073
1569
|
|
|
1074
1570
|
let numAddresses = indexingContracts->Js.Dict.keys->Array.length
|
|
1075
1571
|
Prometheus.IndexingAddresses.set(~addressesCount=numAddresses, ~chainId)
|
|
1076
|
-
Prometheus.IndexingPartitions.set(
|
|
1572
|
+
Prometheus.IndexingPartitions.set(
|
|
1573
|
+
~partitionsCount=optimizedPartitions->OptimizedPartitions.count,
|
|
1574
|
+
~chainId,
|
|
1575
|
+
)
|
|
1077
1576
|
Prometheus.IndexingBufferSize.set(~bufferSize=0, ~chainId)
|
|
1078
1577
|
Prometheus.IndexingBufferBlockNumber.set(~blockNumber=latestFetchedBlock.blockNumber, ~chainId)
|
|
1079
1578
|
switch endBlock {
|
|
@@ -1082,73 +1581,57 @@ let make = (
|
|
|
1082
1581
|
}
|
|
1083
1582
|
|
|
1084
1583
|
{
|
|
1085
|
-
|
|
1086
|
-
nextPartitionIndex: partitions->Array.length,
|
|
1584
|
+
optimizedPartitions,
|
|
1087
1585
|
contractConfigs,
|
|
1088
|
-
maxAddrInPartition,
|
|
1089
1586
|
chainId,
|
|
1090
1587
|
startBlock,
|
|
1091
1588
|
endBlock,
|
|
1092
|
-
latestFullyFetchedBlock: latestFetchedBlock,
|
|
1093
1589
|
latestOnBlockBlockNumber: progressBlockNumber,
|
|
1094
1590
|
normalSelection,
|
|
1095
1591
|
indexingContracts,
|
|
1096
1592
|
blockLag,
|
|
1097
1593
|
onBlockConfigs,
|
|
1098
1594
|
targetBufferSize,
|
|
1595
|
+
knownHeight,
|
|
1099
1596
|
buffer: [],
|
|
1597
|
+
firstEventBlock,
|
|
1100
1598
|
}
|
|
1101
1599
|
}
|
|
1102
1600
|
|
|
1103
1601
|
let bufferSize = ({buffer}: t) => buffer->Array.length
|
|
1104
1602
|
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
let
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
fetchingStateId: None,
|
|
1123
|
-
},
|
|
1124
|
-
})
|
|
1125
|
-
| {addressesByContractName} =>
|
|
1126
|
-
let rollbackedAddressesByContractName = Js.Dict.empty()
|
|
1127
|
-
addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
1128
|
-
let keptAddresses =
|
|
1129
|
-
addresses->Array.keep(address => !(addressesToRemove->Utils.Set.has(address)))
|
|
1130
|
-
if keptAddresses->Array.length > 0 {
|
|
1131
|
-
rollbackedAddressesByContractName->Js.Dict.set(contractName, keptAddresses)
|
|
1603
|
+
let rollbackPendingQueries = (mutPendingQueries: array<pendingQuery>, ~targetBlockNumber) => {
|
|
1604
|
+
// - Remove queries where fromBlock > target
|
|
1605
|
+
// - Cap fetchedBlock at target where fetchedBlock > target
|
|
1606
|
+
let adjusted = []
|
|
1607
|
+
for qIdx in 0 to mutPendingQueries->Array.length - 1 {
|
|
1608
|
+
let pq = mutPendingQueries->Js.Array2.unsafe_get(qIdx)
|
|
1609
|
+
if pq.fromBlock <= targetBlockNumber {
|
|
1610
|
+
switch pq.fetchedBlock {
|
|
1611
|
+
| Some({blockNumber}) if blockNumber > targetBlockNumber =>
|
|
1612
|
+
adjusted
|
|
1613
|
+
->Js.Array2.push({
|
|
1614
|
+
...pq,
|
|
1615
|
+
fetchedBlock: Some({blockNumber: targetBlockNumber, blockTimestamp: 0}),
|
|
1616
|
+
})
|
|
1617
|
+
->ignore
|
|
1618
|
+
| Some(_) => adjusted->Js.Array2.push(pq)->ignore
|
|
1619
|
+
| None => Js.Exn.raiseError("Internal error: Must not have a fetching query during rollback")
|
|
1132
1620
|
}
|
|
1133
|
-
})
|
|
1134
|
-
|
|
1135
|
-
if rollbackedAddressesByContractName->Js.Dict.keys->Array.length === 0 {
|
|
1136
|
-
None
|
|
1137
|
-
} else {
|
|
1138
|
-
Some({
|
|
1139
|
-
id: p.id,
|
|
1140
|
-
selection: p.selection,
|
|
1141
|
-
status: {
|
|
1142
|
-
fetchingStateId: None,
|
|
1143
|
-
},
|
|
1144
|
-
addressesByContractName: rollbackedAddressesByContractName,
|
|
1145
|
-
latestFetchedBlock,
|
|
1146
|
-
})
|
|
1147
1621
|
}
|
|
1148
1622
|
}
|
|
1623
|
+
adjusted
|
|
1149
1624
|
}
|
|
1150
1625
|
|
|
1626
|
+
/**
|
|
1627
|
+
Rolls back fetch state to the given valid block.
|
|
1628
|
+
Always recreates optimized partitions to avoid duplicate addresses:
|
|
1629
|
+
- Wildcard: only rollback latestFetchedBlock
|
|
1630
|
+
- Non-wildcard with lfb <= target: keep, adjust pending queries and mergeBlock
|
|
1631
|
+
- Non-wildcard with lfb > target: delete, track addresses for recreation
|
|
1632
|
+
*/
|
|
1151
1633
|
let rollback = (fetchState: t, ~targetBlockNumber) => {
|
|
1634
|
+
// Step 1: Build addressesToRemove and surviving indexingContracts
|
|
1152
1635
|
let addressesToRemove = Utils.Set.make()
|
|
1153
1636
|
let indexingContracts = Js.Dict.empty()
|
|
1154
1637
|
|
|
@@ -1157,25 +1640,113 @@ let rollback = (fetchState: t, ~targetBlockNumber) => {
|
|
|
1157
1640
|
->Array.forEach(address => {
|
|
1158
1641
|
let indexingContract = fetchState.indexingContracts->Js.Dict.unsafeGet(address)
|
|
1159
1642
|
switch indexingContract.registrationBlock {
|
|
1160
|
-
| Some(registrationBlock) if registrationBlock > targetBlockNumber =>
|
|
1161
|
-
|
|
1162
|
-
//Do not keep it and add to the removed addresses
|
|
1163
|
-
let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString)
|
|
1164
|
-
}
|
|
1643
|
+
| Some(registrationBlock) if registrationBlock > targetBlockNumber =>
|
|
1644
|
+
let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString)
|
|
1165
1645
|
| _ => indexingContracts->Js.Dict.set(address, indexingContract)
|
|
1166
1646
|
}
|
|
1167
1647
|
})
|
|
1168
1648
|
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1649
|
+
// Step 2: Categorize partitions
|
|
1650
|
+
let keptPartitions = []
|
|
1651
|
+
let nextKeptIdRef = ref(0)
|
|
1652
|
+
let registeringContractsByContract: dict<dict<Internal.indexingContract>> = Js.Dict.empty()
|
|
1653
|
+
|
|
1654
|
+
let partitions = fetchState.optimizedPartitions.entities->Js.Dict.values
|
|
1655
|
+
for idx in 0 to partitions->Array.length - 1 {
|
|
1656
|
+
let p = partitions->Js.Array2.unsafe_get(idx)
|
|
1657
|
+
switch p {
|
|
1658
|
+
// Wildcard: rollback latestFetchedBlock and adjust pending queries
|
|
1659
|
+
| {selection: {dependsOnAddresses: false}} =>
|
|
1660
|
+
let id = nextKeptIdRef.contents->Int.toString
|
|
1661
|
+
nextKeptIdRef := nextKeptIdRef.contents + 1
|
|
1662
|
+
keptPartitions
|
|
1663
|
+
->Js.Array2.push({
|
|
1664
|
+
...p,
|
|
1665
|
+
id,
|
|
1666
|
+
latestFetchedBlock: p.latestFetchedBlock.blockNumber > targetBlockNumber
|
|
1667
|
+
? {blockNumber: targetBlockNumber, blockTimestamp: 0}
|
|
1668
|
+
: p.latestFetchedBlock,
|
|
1669
|
+
mutPendingQueries: rollbackPendingQueries(p.mutPendingQueries, ~targetBlockNumber),
|
|
1670
|
+
})
|
|
1671
|
+
->ignore
|
|
1672
|
+
|
|
1673
|
+
// Non-wildcard with lfb > target: delete, collect addresses for recreation
|
|
1674
|
+
| _ if p.latestFetchedBlock.blockNumber > targetBlockNumber =>
|
|
1675
|
+
p.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
1676
|
+
addresses->Array.forEach(address => {
|
|
1677
|
+
if (
|
|
1678
|
+
!(addressesToRemove->Utils.Set.has(address)) &&
|
|
1679
|
+
indexingContracts
|
|
1680
|
+
->Utils.Dict.dangerouslyGetNonOption(address->Address.toString)
|
|
1681
|
+
->Option.isSome
|
|
1682
|
+
) {
|
|
1683
|
+
let registeringContracts =
|
|
1684
|
+
registeringContractsByContract->Utils.Dict.getOrInsertEmptyDict(contractName)
|
|
1685
|
+
registeringContracts->Js.Dict.set(
|
|
1686
|
+
address->Address.toString,
|
|
1687
|
+
indexingContracts->Js.Dict.unsafeGet(address->Address.toString),
|
|
1688
|
+
)
|
|
1689
|
+
}
|
|
1690
|
+
})
|
|
1691
|
+
})
|
|
1692
|
+
|
|
1693
|
+
// Non-wildcard with lfb <= target: keep, adjust pending queries and mergeBlock
|
|
1694
|
+
| {addressesByContractName} => {
|
|
1695
|
+
// Cap mergeBlock at target
|
|
1696
|
+
let mergeBlock = switch p.mergeBlock {
|
|
1697
|
+
| Some(mergeBlock) if mergeBlock > targetBlockNumber => Some(targetBlockNumber)
|
|
1698
|
+
| other => other
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
// Remove addresses that should be removed
|
|
1702
|
+
let rollbackedAddressesByContractName = Js.Dict.empty()
|
|
1703
|
+
addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
1704
|
+
let keptAddresses =
|
|
1705
|
+
addresses->Array.keep(address => !(addressesToRemove->Utils.Set.has(address)))
|
|
1706
|
+
if keptAddresses->Array.length > 0 {
|
|
1707
|
+
rollbackedAddressesByContractName->Js.Dict.set(contractName, keptAddresses)
|
|
1708
|
+
}
|
|
1709
|
+
})
|
|
1710
|
+
|
|
1711
|
+
if rollbackedAddressesByContractName->Js.Dict.keys->Array.length > 0 {
|
|
1712
|
+
let id = nextKeptIdRef.contents->Int.toString
|
|
1713
|
+
nextKeptIdRef := nextKeptIdRef.contents + 1
|
|
1714
|
+
keptPartitions
|
|
1715
|
+
->Js.Array2.push({
|
|
1716
|
+
...p,
|
|
1717
|
+
id,
|
|
1718
|
+
addressesByContractName: rollbackedAddressesByContractName,
|
|
1719
|
+
mutPendingQueries: rollbackPendingQueries(p.mutPendingQueries, ~targetBlockNumber),
|
|
1720
|
+
mergeBlock,
|
|
1721
|
+
})
|
|
1722
|
+
->ignore
|
|
1723
|
+
}
|
|
1724
|
+
}
|
|
1725
|
+
}
|
|
1726
|
+
}
|
|
1727
|
+
|
|
1728
|
+
// Step 3: Recreate partitions from deleted partition addresses
|
|
1729
|
+
let optimizedPartitions = createPartitionsFromIndexingAddresses(
|
|
1730
|
+
~registeringContractsByContract,
|
|
1731
|
+
~contractConfigs=fetchState.contractConfigs,
|
|
1732
|
+
~dynamicContracts=fetchState.optimizedPartitions.dynamicContracts,
|
|
1733
|
+
~normalSelection=fetchState.normalSelection,
|
|
1734
|
+
~maxAddrInPartition=fetchState.optimizedPartitions.maxAddrInPartition,
|
|
1735
|
+
~nextPartitionIndex=nextKeptIdRef.contents,
|
|
1736
|
+
~existingPartitions=keptPartitions,
|
|
1737
|
+
~progressBlockNumber=targetBlockNumber,
|
|
1738
|
+
)
|
|
1173
1739
|
|
|
1740
|
+
// Step 4: Update state
|
|
1174
1741
|
{
|
|
1175
1742
|
...fetchState,
|
|
1176
|
-
|
|
1743
|
+
// TODO: Test this. Currently it's not tested.
|
|
1744
|
+
latestOnBlockBlockNumber: Pervasives.min(
|
|
1745
|
+
fetchState.latestOnBlockBlockNumber,
|
|
1746
|
+
targetBlockNumber,
|
|
1747
|
+
),
|
|
1177
1748
|
}->updateInternal(
|
|
1178
|
-
~
|
|
1749
|
+
~optimizedPartitions,
|
|
1179
1750
|
~indexingContracts,
|
|
1180
1751
|
~mutItems=fetchState.buffer->Array.keep(item =>
|
|
1181
1752
|
switch item {
|
|
@@ -1187,6 +1758,32 @@ let rollback = (fetchState: t, ~targetBlockNumber) => {
|
|
|
1187
1758
|
)
|
|
1188
1759
|
}
|
|
1189
1760
|
|
|
1761
|
+
// Reset pending queries by removing in-flight queries (ones without fetchedBlock).
|
|
1762
|
+
// Completed queries (with fetchedBlock) are kept so rollback can handle them.
|
|
1763
|
+
// Since we can continue fetching partitions with holes, this works correctly.
|
|
1764
|
+
let resetPendingQueries = (fetchState: t) => {
|
|
1765
|
+
let newEntities = fetchState.optimizedPartitions.entities->Utils.Dict.shallowCopy
|
|
1766
|
+
|
|
1767
|
+
for idx in 0 to fetchState.optimizedPartitions.idsInAscOrder->Array.length - 1 {
|
|
1768
|
+
let partitionId = fetchState.optimizedPartitions.idsInAscOrder->Js.Array2.unsafe_get(idx)
|
|
1769
|
+
let partition = fetchState.optimizedPartitions.entities->Js.Dict.unsafeGet(partitionId)
|
|
1770
|
+
|
|
1771
|
+
if partition.mutPendingQueries->Array.length > 0 {
|
|
1772
|
+
// Keep only completed queries (with fetchedBlock)
|
|
1773
|
+
let kept = partition.mutPendingQueries->Array.keep(pq => pq.fetchedBlock !== None)
|
|
1774
|
+
newEntities->Js.Dict.set(partitionId, {...partition, mutPendingQueries: kept})
|
|
1775
|
+
}
|
|
1776
|
+
}
|
|
1777
|
+
|
|
1778
|
+
{
|
|
1779
|
+
...fetchState,
|
|
1780
|
+
optimizedPartitions: {
|
|
1781
|
+
...fetchState.optimizedPartitions,
|
|
1782
|
+
entities: newEntities,
|
|
1783
|
+
},
|
|
1784
|
+
}
|
|
1785
|
+
}
|
|
1786
|
+
|
|
1190
1787
|
/**
|
|
1191
1788
|
* Returns a boolean indicating whether the fetch state is actively indexing
|
|
1192
1789
|
* used for comparing event queues in the chain manager
|
|
@@ -1204,15 +1801,12 @@ let isActivelyIndexing = ({endBlock} as fetchState: t) => {
|
|
|
1204
1801
|
}
|
|
1205
1802
|
}
|
|
1206
1803
|
|
|
1207
|
-
let isReadyToEnterReorgThreshold = (
|
|
1208
|
-
{endBlock, blockLag, buffer} as fetchState: t,
|
|
1209
|
-
~currentBlockHeight,
|
|
1210
|
-
) => {
|
|
1804
|
+
let isReadyToEnterReorgThreshold = ({endBlock, blockLag, buffer, knownHeight} as fetchState: t) => {
|
|
1211
1805
|
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
1212
|
-
|
|
1806
|
+
knownHeight !== 0 &&
|
|
1213
1807
|
switch endBlock {
|
|
1214
1808
|
| Some(endBlock) if bufferBlockNumber >= endBlock => true
|
|
1215
|
-
| _ => bufferBlockNumber >=
|
|
1809
|
+
| _ => bufferBlockNumber >= knownHeight - blockLag
|
|
1216
1810
|
} &&
|
|
1217
1811
|
buffer->Utils.Array.isEmpty
|
|
1218
1812
|
}
|
|
@@ -1225,26 +1819,40 @@ let sortForUnorderedBatch = {
|
|
|
1225
1819
|
}
|
|
1226
1820
|
}
|
|
1227
1821
|
|
|
1822
|
+
// Lower progress percentage = further behind = higher priority
|
|
1823
|
+
let getProgressPercentage = (fetchState: t) => {
|
|
1824
|
+
switch fetchState.firstEventBlock {
|
|
1825
|
+
| None => 0.
|
|
1826
|
+
| Some(firstEventBlock) =>
|
|
1827
|
+
let totalRange = fetchState.knownHeight - firstEventBlock
|
|
1828
|
+
if totalRange <= 0 {
|
|
1829
|
+
0.
|
|
1830
|
+
} else {
|
|
1831
|
+
let progress = switch fetchState.buffer->Belt.Array.get(0) {
|
|
1832
|
+
| Some(item) => item->Internal.getItemBlockNumber - firstEventBlock
|
|
1833
|
+
| None => fetchState->bufferBlockNumber - firstEventBlock
|
|
1834
|
+
}
|
|
1835
|
+
progress->Int.toFloat /. totalRange->Int.toFloat
|
|
1836
|
+
}
|
|
1837
|
+
}
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1228
1840
|
(fetchStates: array<t>, ~batchSizeTarget: int) => {
|
|
1229
1841
|
fetchStates
|
|
1230
1842
|
->Array.copy
|
|
1231
1843
|
->Js.Array2.sortInPlaceWith((a: t, b: t) => {
|
|
1232
1844
|
switch (a->hasFullBatch(~batchSizeTarget), b->hasFullBatch(~batchSizeTarget)) {
|
|
1233
1845
|
| (true, true)
|
|
1234
|
-
| (false, false) =>
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
// Just keep them to increase the progress block number when relevant
|
|
1245
|
-
| (Some(_), None) => -1
|
|
1246
|
-
| (None, Some(_)) => 1
|
|
1247
|
-
| (None, None) => 0
|
|
1846
|
+
| (false, false) => {
|
|
1847
|
+
let aProgress = a->getProgressPercentage
|
|
1848
|
+
let bProgress = b->getProgressPercentage
|
|
1849
|
+
if aProgress < bProgress {
|
|
1850
|
+
-1
|
|
1851
|
+
} else if aProgress > bProgress {
|
|
1852
|
+
1
|
|
1853
|
+
} else {
|
|
1854
|
+
0
|
|
1855
|
+
}
|
|
1248
1856
|
}
|
|
1249
1857
|
| (true, false) => -1
|
|
1250
1858
|
| (false, true) => 1
|
|
@@ -1262,3 +1870,12 @@ let getUnorderedMultichainProgressBlockNumberAt = ({buffer} as fetchState: t, ~i
|
|
|
1262
1870
|
| _ => bufferBlockNumber
|
|
1263
1871
|
}
|
|
1264
1872
|
}
|
|
1873
|
+
|
|
1874
|
+
let updateKnownHeight = (fetchState: t, ~knownHeight) => {
|
|
1875
|
+
if knownHeight > fetchState.knownHeight {
|
|
1876
|
+
Prometheus.IndexingKnownHeight.set(~blockNumber=knownHeight, ~chainId=fetchState.chainId)
|
|
1877
|
+
fetchState->updateInternal(~knownHeight)
|
|
1878
|
+
} else {
|
|
1879
|
+
fetchState
|
|
1880
|
+
}
|
|
1881
|
+
}
|