envio 2.31.0-alpha.0 → 2.31.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -5
- package/src/Batch.res +400 -28
- package/src/Batch.res.js +286 -24
- package/src/EventRegister.res +9 -3
- package/src/EventRegister.res.js +6 -3
- package/src/EventRegister.resi +4 -1
- package/src/FetchState.res +116 -155
- package/src/FetchState.res.js +116 -106
- package/src/Internal.res +49 -0
- package/src/InternalConfig.res +1 -1
- package/src/Persistence.res +16 -1
- package/src/Persistence.res.js +1 -1
- package/src/PgStorage.res +49 -61
- package/src/PgStorage.res.js +44 -37
- package/src/Prometheus.res +7 -1
- package/src/Prometheus.res.js +8 -1
- package/src/ReorgDetection.res +222 -235
- package/src/ReorgDetection.res.js +34 -28
- package/src/SafeCheckpointTracking.res +132 -0
- package/src/SafeCheckpointTracking.res.js +95 -0
- package/src/Utils.res +64 -21
- package/src/Utils.res.js +61 -30
- package/src/db/EntityHistory.res +172 -294
- package/src/db/EntityHistory.res.js +98 -218
- package/src/db/InternalTable.gen.ts +13 -13
- package/src/db/InternalTable.res +286 -77
- package/src/db/InternalTable.res.js +160 -79
- package/src/db/Table.res +1 -0
- package/src/db/Table.res.js +1 -1
- package/src/sources/EventRouter.res +1 -1
- package/src/sources/Source.res +1 -1
package/src/FetchState.res
CHANGED
|
@@ -1,24 +1,5 @@
|
|
|
1
1
|
open Belt
|
|
2
2
|
|
|
3
|
-
type dcData = {
|
|
4
|
-
registeringEventBlockTimestamp: int,
|
|
5
|
-
registeringEventLogIndex: int,
|
|
6
|
-
registeringEventContractName: string,
|
|
7
|
-
registeringEventName: string,
|
|
8
|
-
registeringEventSrcAddress: Address.t,
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
@unboxed
|
|
12
|
-
type contractRegister =
|
|
13
|
-
| Config
|
|
14
|
-
| DC(dcData)
|
|
15
|
-
type indexingContract = {
|
|
16
|
-
address: Address.t,
|
|
17
|
-
contractName: string,
|
|
18
|
-
startBlock: int,
|
|
19
|
-
register: contractRegister,
|
|
20
|
-
}
|
|
21
|
-
|
|
22
3
|
type contractConfig = {filterByAddresses: bool}
|
|
23
4
|
|
|
24
5
|
type blockNumberAndTimestamp = {
|
|
@@ -56,12 +37,9 @@ type t = {
|
|
|
56
37
|
maxAddrInPartition: int,
|
|
57
38
|
normalSelection: selection,
|
|
58
39
|
// By address
|
|
59
|
-
indexingContracts: dict<indexingContract>,
|
|
40
|
+
indexingContracts: dict<Internal.indexingContract>,
|
|
60
41
|
// By contract name
|
|
61
42
|
contractConfigs: dict<contractConfig>,
|
|
62
|
-
// Registered dynamic contracts that need to be stored in the db
|
|
63
|
-
// Should read them at the same time when getting items for the batch
|
|
64
|
-
dcsToStore: array<indexingContract>,
|
|
65
43
|
// Not used for logic - only metadata
|
|
66
44
|
chainId: int,
|
|
67
45
|
// The block number of the latest block fetched
|
|
@@ -92,7 +70,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
92
70
|
|
|
93
71
|
let allowedAddressesNumber = ref(maxAddrInPartition)
|
|
94
72
|
|
|
95
|
-
target.addressesByContractName->Utils.Dict.forEachWithKey((
|
|
73
|
+
target.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
96
74
|
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
97
75
|
mergedAddresses->Js.Dict.set(contractName, addresses)
|
|
98
76
|
})
|
|
@@ -100,7 +78,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
100
78
|
// Start with putting all addresses to the merging dict
|
|
101
79
|
// And if they exceed the limit, start removing from the merging dict
|
|
102
80
|
// and putting into the rest dict
|
|
103
|
-
p.addressesByContractName->Utils.Dict.forEachWithKey((
|
|
81
|
+
p.addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
104
82
|
allowedAddressesNumber := allowedAddressesNumber.contents - addresses->Array.length
|
|
105
83
|
switch mergedAddresses->Utils.Dict.dangerouslyGetNonOption(contractName) {
|
|
106
84
|
| Some(targetAddresses) =>
|
|
@@ -112,7 +90,7 @@ let mergeIntoPartition = (p: partition, ~target: partition, ~maxAddrInPartition)
|
|
|
112
90
|
let rest = if allowedAddressesNumber.contents < 0 {
|
|
113
91
|
let restAddresses = Js.Dict.empty()
|
|
114
92
|
|
|
115
|
-
mergedAddresses->Utils.Dict.forEachWithKey((
|
|
93
|
+
mergedAddresses->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
116
94
|
if allowedAddressesNumber.contents === 0 {
|
|
117
95
|
()
|
|
118
96
|
} else if addresses->Array.length <= -allowedAddressesNumber.contents {
|
|
@@ -204,7 +182,6 @@ let updateInternal = (
|
|
|
204
182
|
~partitions=fetchState.partitions,
|
|
205
183
|
~nextPartitionIndex=fetchState.nextPartitionIndex,
|
|
206
184
|
~indexingContracts=fetchState.indexingContracts,
|
|
207
|
-
~dcsToStore=fetchState.dcsToStore,
|
|
208
185
|
~mutItems=?,
|
|
209
186
|
~blockLag=fetchState.blockLag,
|
|
210
187
|
): t => {
|
|
@@ -304,7 +281,6 @@ let updateInternal = (
|
|
|
304
281
|
latestOnBlockBlockNumber,
|
|
305
282
|
latestFullyFetchedBlock,
|
|
306
283
|
indexingContracts,
|
|
307
|
-
dcsToStore,
|
|
308
284
|
blockLag,
|
|
309
285
|
buffer: switch mutItemsRef.contents {
|
|
310
286
|
// Theoretically it could be faster to asume that
|
|
@@ -333,7 +309,11 @@ let updateInternal = (
|
|
|
333
309
|
|
|
334
310
|
let numAddresses = fetchState => fetchState.indexingContracts->Js.Dict.keys->Array.length
|
|
335
311
|
|
|
336
|
-
let warnDifferentContractType = (
|
|
312
|
+
let warnDifferentContractType = (
|
|
313
|
+
fetchState,
|
|
314
|
+
~existingContract: Internal.indexingContract,
|
|
315
|
+
~dc: Internal.indexingContract,
|
|
316
|
+
) => {
|
|
337
317
|
let logger = Logging.createChild(
|
|
338
318
|
~params={
|
|
339
319
|
"chainId": fetchState.chainId,
|
|
@@ -347,9 +327,9 @@ let warnDifferentContractType = (fetchState, ~existingContract, ~dc: indexingCon
|
|
|
347
327
|
|
|
348
328
|
let registerDynamicContracts = (
|
|
349
329
|
fetchState: t,
|
|
350
|
-
// These are raw dynamic contracts received from contractRegister call.
|
|
330
|
+
// These are raw items which might have dynamic contracts received from contractRegister call.
|
|
351
331
|
// Might contain duplicates which we should filter out
|
|
352
|
-
|
|
332
|
+
items: array<Internal.item>,
|
|
353
333
|
) => {
|
|
354
334
|
if fetchState.normalSelection.eventConfigs->Utils.Array.isEmpty {
|
|
355
335
|
// Can the normalSelection be empty?
|
|
@@ -361,79 +341,83 @@ let registerDynamicContracts = (
|
|
|
361
341
|
}
|
|
362
342
|
|
|
363
343
|
let indexingContracts = fetchState.indexingContracts
|
|
364
|
-
let registeringContracts = Js.Dict.empty()
|
|
344
|
+
let registeringContracts: dict<Internal.indexingContract> = Js.Dict.empty()
|
|
365
345
|
let addressesByContractName = Js.Dict.empty()
|
|
366
346
|
let earliestRegisteringEventBlockNumber = ref(%raw(`Infinity`))
|
|
367
347
|
let hasDCWithFilterByAddresses = ref(false)
|
|
368
348
|
|
|
369
|
-
for
|
|
370
|
-
let
|
|
371
|
-
switch
|
|
372
|
-
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
)
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
349
|
+
for itemIdx in 0 to items->Array.length - 1 {
|
|
350
|
+
let item = items->Js.Array2.unsafe_get(itemIdx)
|
|
351
|
+
switch item->Internal.getItemDcs {
|
|
352
|
+
| None => ()
|
|
353
|
+
| Some(dcs) =>
|
|
354
|
+
for idx in 0 to dcs->Array.length - 1 {
|
|
355
|
+
let dc = dcs->Js.Array2.unsafe_get(idx)
|
|
356
|
+
|
|
357
|
+
switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) {
|
|
358
|
+
| Some({filterByAddresses}) =>
|
|
359
|
+
// Prevent registering already indexing contracts
|
|
360
|
+
switch indexingContracts->Utils.Dict.dangerouslyGetNonOption(
|
|
361
|
+
dc.address->Address.toString,
|
|
362
|
+
) {
|
|
363
|
+
| Some(existingContract) =>
|
|
364
|
+
// FIXME: Instead of filtering out duplicates,
|
|
365
|
+
// we should check the block number first.
|
|
366
|
+
// If new registration with earlier block number
|
|
367
|
+
// we should register it for the missing block range
|
|
368
|
+
if existingContract.contractName != dc.contractName {
|
|
369
|
+
fetchState->warnDifferentContractType(~existingContract, ~dc)
|
|
370
|
+
} else if existingContract.startBlock > dc.startBlock {
|
|
371
|
+
let logger = Logging.createChild(
|
|
372
|
+
~params={
|
|
373
|
+
"chainId": fetchState.chainId,
|
|
374
|
+
"contractAddress": dc.address->Address.toString,
|
|
375
|
+
"existingBlockNumber": existingContract.startBlock,
|
|
376
|
+
"newBlockNumber": dc.startBlock,
|
|
377
|
+
},
|
|
378
|
+
)
|
|
379
|
+
logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`)
|
|
380
|
+
}
|
|
381
|
+
// Remove the DC from item to prevent it from saving to the db
|
|
382
|
+
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
|
|
383
|
+
| None =>
|
|
384
|
+
let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption(
|
|
385
|
+
dc.address->Address.toString,
|
|
386
|
+
) {
|
|
387
|
+
| Some(registeringContract) if registeringContract.contractName != dc.contractName =>
|
|
388
|
+
fetchState->warnDifferentContractType(~existingContract=registeringContract, ~dc)
|
|
389
|
+
false
|
|
390
|
+
| Some(_) => // Since the DC is registered by an earlier item in the query
|
|
391
|
+
// FIXME: This unsafely relies on the asc order of the items
|
|
392
|
+
// which is 99% true, but there were cases when the source ordering was wrong
|
|
393
|
+
false
|
|
394
|
+
| None =>
|
|
395
|
+
hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses
|
|
396
|
+
addressesByContractName->Utils.Dict.push(dc.contractName, dc.address)
|
|
397
|
+
true
|
|
398
|
+
}
|
|
399
|
+
if shouldUpdate {
|
|
400
|
+
earliestRegisteringEventBlockNumber :=
|
|
401
|
+
Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock)
|
|
402
|
+
registeringContracts->Js.Dict.set(dc.address->Address.toString, dc)
|
|
403
|
+
} else {
|
|
404
|
+
// Remove the DC from item to prevent it from saving to the db
|
|
405
|
+
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
| None => {
|
|
409
|
+
let logger = Logging.createChild(
|
|
410
|
+
~params={
|
|
411
|
+
"chainId": fetchState.chainId,
|
|
412
|
+
"contractAddress": dc.address->Address.toString,
|
|
413
|
+
"contractName": dc.contractName,
|
|
414
|
+
},
|
|
414
415
|
)
|
|
416
|
+
logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`)
|
|
417
|
+
let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
|
|
415
418
|
}
|
|
416
|
-
| None =>
|
|
417
|
-
hasDCWithFilterByAddresses := hasDCWithFilterByAddresses.contents || filterByAddresses
|
|
418
|
-
addressesByContractName->Utils.Dict.push(dc.contractName, dc.address)
|
|
419
|
-
true
|
|
420
|
-
}
|
|
421
|
-
if shouldUpdate {
|
|
422
|
-
earliestRegisteringEventBlockNumber :=
|
|
423
|
-
Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock)
|
|
424
|
-
registeringContracts->Js.Dict.set(dc.address->Address.toString, dc)
|
|
425
419
|
}
|
|
426
420
|
}
|
|
427
|
-
| None => {
|
|
428
|
-
let logger = Logging.createChild(
|
|
429
|
-
~params={
|
|
430
|
-
"chainId": fetchState.chainId,
|
|
431
|
-
"contractAddress": dc.address->Address.toString,
|
|
432
|
-
"contractName": dc.contractName,
|
|
433
|
-
},
|
|
434
|
-
)
|
|
435
|
-
logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`)
|
|
436
|
-
}
|
|
437
421
|
}
|
|
438
422
|
}
|
|
439
423
|
|
|
@@ -568,10 +552,6 @@ let registerDynamicContracts = (
|
|
|
568
552
|
|
|
569
553
|
fetchState->updateInternal(
|
|
570
554
|
~partitions=fetchState.partitions->Js.Array2.concat(newPartitions),
|
|
571
|
-
~dcsToStore=switch fetchState.dcsToStore {
|
|
572
|
-
| [] => dcsToStore
|
|
573
|
-
| existingDcs => Array.concat(existingDcs, dcsToStore)
|
|
574
|
-
},
|
|
575
555
|
~indexingContracts=// We don't need registeringContracts anymore,
|
|
576
556
|
// so we can safely mixin indexingContracts in it
|
|
577
557
|
// The original indexingContracts won't be mutated
|
|
@@ -598,7 +578,7 @@ type query = {
|
|
|
598
578
|
selection: selection,
|
|
599
579
|
addressesByContractName: dict<array<Address.t>>,
|
|
600
580
|
target: queryTarget,
|
|
601
|
-
indexingContracts: dict<indexingContract>,
|
|
581
|
+
indexingContracts: dict<Internal.indexingContract>,
|
|
602
582
|
}
|
|
603
583
|
|
|
604
584
|
exception UnexpectedPartitionNotFound({partitionId: string})
|
|
@@ -975,7 +955,7 @@ let make = (
|
|
|
975
955
|
~startBlock,
|
|
976
956
|
~endBlock,
|
|
977
957
|
~eventConfigs: array<Internal.eventConfig>,
|
|
978
|
-
~contracts: array<indexingContract>,
|
|
958
|
+
~contracts: array<Internal.indexingContract>,
|
|
979
959
|
~maxAddrInPartition,
|
|
980
960
|
~chainId,
|
|
981
961
|
~targetBufferSize,
|
|
@@ -1104,7 +1084,6 @@ let make = (
|
|
|
1104
1084
|
latestOnBlockBlockNumber: progressBlockNumber,
|
|
1105
1085
|
normalSelection,
|
|
1106
1086
|
indexingContracts,
|
|
1107
|
-
dcsToStore: [],
|
|
1108
1087
|
blockLag,
|
|
1109
1088
|
onBlockConfigs,
|
|
1110
1089
|
targetBufferSize,
|
|
@@ -1114,31 +1093,14 @@ let make = (
|
|
|
1114
1093
|
|
|
1115
1094
|
let bufferSize = ({buffer}: t) => buffer->Array.length
|
|
1116
1095
|
|
|
1117
|
-
let pruneQueueFromFirstChangeEvent = (
|
|
1118
|
-
buffer: array<Internal.item>,
|
|
1119
|
-
~firstChangeEvent: blockNumberAndLogIndex,
|
|
1120
|
-
) => {
|
|
1121
|
-
buffer->Array.keep(item =>
|
|
1122
|
-
switch item {
|
|
1123
|
-
| Event({blockNumber, logIndex})
|
|
1124
|
-
| Block({blockNumber, logIndex}) => (blockNumber, logIndex)
|
|
1125
|
-
} <
|
|
1126
|
-
(firstChangeEvent.blockNumber, firstChangeEvent.logIndex)
|
|
1127
|
-
)
|
|
1128
|
-
}
|
|
1129
|
-
|
|
1130
1096
|
/**
|
|
1131
1097
|
Rolls back partitions to the given valid block
|
|
1132
1098
|
*/
|
|
1133
|
-
let rollbackPartition = (
|
|
1134
|
-
p
|
|
1135
|
-
~firstChangeEvent: blockNumberAndLogIndex,
|
|
1136
|
-
~addressesToRemove,
|
|
1137
|
-
) => {
|
|
1138
|
-
let shouldRollbackFetched = p.latestFetchedBlock.blockNumber >= firstChangeEvent.blockNumber
|
|
1099
|
+
let rollbackPartition = (p: partition, ~targetBlockNumber, ~addressesToRemove) => {
|
|
1100
|
+
let shouldRollbackFetched = p.latestFetchedBlock.blockNumber > targetBlockNumber
|
|
1139
1101
|
let latestFetchedBlock = shouldRollbackFetched
|
|
1140
1102
|
? {
|
|
1141
|
-
blockNumber:
|
|
1103
|
+
blockNumber: targetBlockNumber,
|
|
1142
1104
|
blockTimestamp: 0,
|
|
1143
1105
|
}
|
|
1144
1106
|
: p.latestFetchedBlock
|
|
@@ -1153,7 +1115,7 @@ let rollbackPartition = (
|
|
|
1153
1115
|
})
|
|
1154
1116
|
| {addressesByContractName} =>
|
|
1155
1117
|
let rollbackedAddressesByContractName = Js.Dict.empty()
|
|
1156
|
-
addressesByContractName->Utils.Dict.forEachWithKey((
|
|
1118
|
+
addressesByContractName->Utils.Dict.forEachWithKey((addresses, contractName) => {
|
|
1157
1119
|
let keptAddresses =
|
|
1158
1120
|
addresses->Array.keep(address => !(addressesToRemove->Utils.Set.has(address)))
|
|
1159
1121
|
if keptAddresses->Array.length > 0 {
|
|
@@ -1177,7 +1139,7 @@ let rollbackPartition = (
|
|
|
1177
1139
|
}
|
|
1178
1140
|
}
|
|
1179
1141
|
|
|
1180
|
-
let rollback = (fetchState: t, ~
|
|
1142
|
+
let rollback = (fetchState: t, ~targetBlockNumber) => {
|
|
1181
1143
|
let addressesToRemove = Utils.Set.make()
|
|
1182
1144
|
let indexingContracts = Js.Dict.empty()
|
|
1183
1145
|
|
|
@@ -1185,40 +1147,34 @@ let rollback = (fetchState: t, ~firstChangeEvent) => {
|
|
|
1185
1147
|
->Js.Dict.keys
|
|
1186
1148
|
->Array.forEach(address => {
|
|
1187
1149
|
let indexingContract = fetchState.indexingContracts->Js.Dict.unsafeGet(address)
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
(indexingContract.startBlock === firstChangeEvent.blockNumber &&
|
|
1194
|
-
dc.registeringEventLogIndex < firstChangeEvent.logIndex)
|
|
1150
|
+
switch indexingContract.registrationBlock {
|
|
1151
|
+
| Some(registrationBlock) if registrationBlock > targetBlockNumber => {
|
|
1152
|
+
//If the registration block is later than the first change event,
|
|
1153
|
+
//Do not keep it and add to the removed addresses
|
|
1154
|
+
let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString)
|
|
1195
1155
|
}
|
|
1196
|
-
)
|
|
1197
|
-
indexingContracts->Js.Dict.set(address, indexingContract)
|
|
1198
|
-
} else {
|
|
1199
|
-
//If the registration block is later than the first change event,
|
|
1200
|
-
//Do not keep it and add to the removed addresses
|
|
1201
|
-
let _ = addressesToRemove->Utils.Set.add(address->Address.unsafeFromString)
|
|
1156
|
+
| _ => indexingContracts->Js.Dict.set(address, indexingContract)
|
|
1202
1157
|
}
|
|
1203
1158
|
})
|
|
1204
1159
|
|
|
1205
1160
|
let partitions =
|
|
1206
1161
|
fetchState.partitions->Array.keepMap(p =>
|
|
1207
|
-
p->rollbackPartition(~
|
|
1162
|
+
p->rollbackPartition(~targetBlockNumber, ~addressesToRemove)
|
|
1208
1163
|
)
|
|
1209
1164
|
|
|
1210
1165
|
{
|
|
1211
1166
|
...fetchState,
|
|
1212
|
-
latestOnBlockBlockNumber:
|
|
1167
|
+
latestOnBlockBlockNumber: targetBlockNumber, // TODO: This is not tested. I assume there might be a possible issue of it skipping some blocks
|
|
1213
1168
|
}->updateInternal(
|
|
1214
1169
|
~partitions,
|
|
1215
1170
|
~indexingContracts,
|
|
1216
|
-
~mutItems=fetchState.buffer->
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1171
|
+
~mutItems=fetchState.buffer->Array.keep(item =>
|
|
1172
|
+
switch item {
|
|
1173
|
+
| Event({blockNumber})
|
|
1174
|
+
| Block({blockNumber}) => blockNumber
|
|
1175
|
+
} <=
|
|
1176
|
+
targetBlockNumber
|
|
1177
|
+
),
|
|
1222
1178
|
)
|
|
1223
1179
|
}
|
|
1224
1180
|
|
|
@@ -1252,7 +1208,7 @@ let isReadyToEnterReorgThreshold = (
|
|
|
1252
1208
|
buffer->Utils.Array.isEmpty
|
|
1253
1209
|
}
|
|
1254
1210
|
|
|
1255
|
-
let
|
|
1211
|
+
let sortForUnorderedBatch = {
|
|
1256
1212
|
let hasFullBatch = ({buffer} as fetchState: t, ~batchSizeTarget) => {
|
|
1257
1213
|
switch buffer->Belt.Array.get(batchSizeTarget - 1) {
|
|
1258
1214
|
| Some(item) => item->Internal.getItemBlockNumber <= fetchState->bufferBlockNumber
|
|
@@ -1262,20 +1218,24 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1262
1218
|
|
|
1263
1219
|
(fetchStates: array<t>, ~batchSizeTarget: int) => {
|
|
1264
1220
|
fetchStates
|
|
1265
|
-
->Array.
|
|
1221
|
+
->Array.copy
|
|
1266
1222
|
->Js.Array2.sortInPlaceWith((a: t, b: t) => {
|
|
1267
1223
|
switch (a->hasFullBatch(~batchSizeTarget), b->hasFullBatch(~batchSizeTarget)) {
|
|
1268
1224
|
| (true, true)
|
|
1269
1225
|
| (false, false) =>
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
| (Event({timestamp: aTimestamp}), Event({timestamp: bTimestamp})) =>
|
|
1226
|
+
switch (a.buffer->Belt.Array.get(0), b.buffer->Belt.Array.get(0)) {
|
|
1227
|
+
| (Some(Event({timestamp: aTimestamp})), Some(Event({timestamp: bTimestamp}))) =>
|
|
1273
1228
|
aTimestamp - bTimestamp
|
|
1274
|
-
| (Block(_), _)
|
|
1275
|
-
| (_, Block(_)) =>
|
|
1229
|
+
| (Some(Block(_)), _)
|
|
1230
|
+
| (_, Some(Block(_))) =>
|
|
1276
1231
|
// Currently block items don't have a timestamp,
|
|
1277
1232
|
// so we sort chains with them in a random order
|
|
1278
1233
|
Js.Math.random_int(-1, 1)
|
|
1234
|
+
// We don't care about the order of chains with no items
|
|
1235
|
+
// Just keep them to increase the progress block number when relevant
|
|
1236
|
+
| (Some(_), None) => -1
|
|
1237
|
+
| (None, Some(_)) => 1
|
|
1238
|
+
| (None, None) => 0
|
|
1279
1239
|
}
|
|
1280
1240
|
| (true, false) => -1
|
|
1281
1241
|
| (false, true) => 1
|
|
@@ -1284,9 +1244,10 @@ let filterAndSortForUnorderedBatch = {
|
|
|
1284
1244
|
}
|
|
1285
1245
|
}
|
|
1286
1246
|
|
|
1287
|
-
|
|
1247
|
+
// Ordered multichain mode can't skip blocks, even if there are no items.
|
|
1248
|
+
let getUnorderedMultichainProgressBlockNumberAt = ({buffer} as fetchState: t, ~index) => {
|
|
1288
1249
|
let bufferBlockNumber = fetchState->bufferBlockNumber
|
|
1289
|
-
switch buffer->Belt.Array.get(
|
|
1250
|
+
switch buffer->Belt.Array.get(index) {
|
|
1290
1251
|
| Some(item) if bufferBlockNumber >= item->Internal.getItemBlockNumber =>
|
|
1291
1252
|
item->Internal.getItemBlockNumber - 1
|
|
1292
1253
|
| _ => bufferBlockNumber
|