@across-protocol/sdk 3.1.27 → 3.1.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/clients/BundleDataClient/BundleDataClient.d.ts +60 -0
- package/dist/cjs/clients/BundleDataClient/BundleDataClient.js +929 -0
- package/dist/cjs/clients/BundleDataClient/BundleDataClient.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/index.d.ts +2 -0
- package/dist/cjs/clients/BundleDataClient/index.js +6 -0
- package/dist/cjs/clients/BundleDataClient/index.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/DataworkerUtils.d.ts +15 -0
- package/dist/cjs/clients/BundleDataClient/utils/DataworkerUtils.js +131 -0
- package/dist/cjs/clients/BundleDataClient/utils/DataworkerUtils.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/FillUtils.d.ts +6 -0
- package/dist/cjs/clients/BundleDataClient/utils/FillUtils.js +19 -0
- package/dist/cjs/clients/BundleDataClient/utils/FillUtils.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/MerkleTreeUtils.d.ts +3 -0
- package/dist/cjs/clients/BundleDataClient/utils/MerkleTreeUtils.js +23 -0
- package/dist/cjs/clients/BundleDataClient/utils/MerkleTreeUtils.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/PoolRebalanceUtils.d.ts +24 -0
- package/dist/cjs/clients/BundleDataClient/utils/PoolRebalanceUtils.js +129 -0
- package/dist/cjs/clients/BundleDataClient/utils/PoolRebalanceUtils.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/SuperstructUtils.d.ts +302 -0
- package/dist/cjs/clients/BundleDataClient/utils/SuperstructUtils.js +77 -0
- package/dist/cjs/clients/BundleDataClient/utils/SuperstructUtils.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/index.d.ts +6 -0
- package/dist/cjs/clients/BundleDataClient/utils/index.js +10 -0
- package/dist/cjs/clients/BundleDataClient/utils/index.js.map +1 -0
- package/dist/cjs/clients/BundleDataClient/utils/shims.d.ts +8 -0
- package/dist/cjs/clients/BundleDataClient/utils/shims.js +3 -0
- package/dist/cjs/clients/BundleDataClient/utils/shims.js.map +1 -0
- package/dist/cjs/clients/index.d.ts +1 -0
- package/dist/cjs/clients/index.js +2 -1
- package/dist/cjs/clients/index.js.map +1 -1
- package/dist/cjs/interfaces/BundleData.d.ts +63 -0
- package/dist/cjs/interfaces/BundleData.js +3 -0
- package/dist/cjs/interfaces/BundleData.js.map +1 -0
- package/dist/cjs/interfaces/index.d.ts +1 -0
- package/dist/cjs/interfaces/index.js +1 -0
- package/dist/cjs/interfaces/index.js.map +1 -1
- package/dist/cjs/providers/alchemy.js +5 -5
- package/dist/cjs/providers/alchemy.js.map +1 -1
- package/dist/cjs/providers/drpc.d.ts +2 -0
- package/dist/cjs/providers/drpc.js +21 -0
- package/dist/cjs/providers/drpc.js.map +1 -0
- package/dist/cjs/providers/infura.js +5 -5
- package/dist/cjs/providers/infura.js.map +1 -1
- package/dist/cjs/providers/types.d.ts +1 -1
- package/dist/cjs/providers/utils.js +3 -1
- package/dist/cjs/providers/utils.js.map +1 -1
- package/dist/cjs/utils/AddressUtils.d.ts +1 -0
- package/dist/cjs/utils/AddressUtils.js +15 -1
- package/dist/cjs/utils/AddressUtils.js.map +1 -1
- package/dist/cjs/utils/ContractUtils.d.ts +1 -0
- package/dist/cjs/utils/ContractUtils.js +12 -0
- package/dist/cjs/utils/ContractUtils.js.map +1 -0
- package/dist/cjs/utils/Multicall.d.ts +3 -2
- package/dist/cjs/utils/Multicall.js +40 -41
- package/dist/cjs/utils/Multicall.js.map +1 -1
- package/dist/cjs/utils/ObjectUtils.d.ts +18 -0
- package/dist/cjs/utils/ObjectUtils.js +27 -1
- package/dist/cjs/utils/ObjectUtils.js.map +1 -1
- package/dist/cjs/utils/abi/contracts/index.d.ts +1 -0
- package/dist/cjs/utils/abi/contracts/index.js +9 -0
- package/dist/cjs/utils/abi/contracts/index.js.map +1 -0
- package/dist/cjs/utils/abi/typechain/Multicall3.d.ts +289 -0
- package/dist/cjs/utils/abi/typechain/Multicall3.js +3 -0
- package/dist/cjs/utils/abi/typechain/Multicall3.js.map +1 -0
- package/dist/cjs/utils/abi/typechain/common.d.ts +21 -0
- package/dist/cjs/utils/abi/typechain/common.js +3 -0
- package/dist/cjs/utils/abi/typechain/common.js.map +1 -0
- package/dist/cjs/utils/abi/typechain/factories/Multicall3__factory.d.ts +339 -0
- package/dist/cjs/utils/abi/typechain/factories/Multicall3__factory.js +458 -0
- package/dist/cjs/utils/abi/typechain/factories/Multicall3__factory.js.map +1 -0
- package/dist/cjs/utils/abi/typechain/factories/index.d.ts +1 -0
- package/dist/cjs/utils/abi/typechain/factories/index.js +6 -0
- package/dist/cjs/utils/abi/typechain/factories/index.js.map +1 -0
- package/dist/cjs/utils/abi/typechain/index.d.ts +3 -0
- package/dist/cjs/utils/abi/typechain/index.js +8 -0
- package/dist/cjs/utils/abi/typechain/index.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/BundleDataClient.d.ts +60 -0
- package/dist/esm/clients/BundleDataClient/BundleDataClient.js +1097 -0
- package/dist/esm/clients/BundleDataClient/BundleDataClient.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/index.d.ts +2 -0
- package/dist/esm/clients/BundleDataClient/index.js +3 -0
- package/dist/esm/clients/BundleDataClient/index.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/DataworkerUtils.d.ts +15 -0
- package/dist/esm/clients/BundleDataClient/utils/DataworkerUtils.js +182 -0
- package/dist/esm/clients/BundleDataClient/utils/DataworkerUtils.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/FillUtils.d.ts +6 -0
- package/dist/esm/clients/BundleDataClient/utils/FillUtils.js +20 -0
- package/dist/esm/clients/BundleDataClient/utils/FillUtils.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/MerkleTreeUtils.d.ts +3 -0
- package/dist/esm/clients/BundleDataClient/utils/MerkleTreeUtils.js +20 -0
- package/dist/esm/clients/BundleDataClient/utils/MerkleTreeUtils.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/PoolRebalanceUtils.d.ts +24 -0
- package/dist/esm/clients/BundleDataClient/utils/PoolRebalanceUtils.js +157 -0
- package/dist/esm/clients/BundleDataClient/utils/PoolRebalanceUtils.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/SuperstructUtils.d.ts +302 -0
- package/dist/esm/clients/BundleDataClient/utils/SuperstructUtils.js +79 -0
- package/dist/esm/clients/BundleDataClient/utils/SuperstructUtils.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/index.d.ts +6 -0
- package/dist/esm/clients/BundleDataClient/utils/index.js +7 -0
- package/dist/esm/clients/BundleDataClient/utils/index.js.map +1 -0
- package/dist/esm/clients/BundleDataClient/utils/shims.d.ts +8 -0
- package/dist/esm/clients/BundleDataClient/utils/shims.js +2 -0
- package/dist/esm/clients/BundleDataClient/utils/shims.js.map +1 -0
- package/dist/esm/clients/index.d.ts +1 -0
- package/dist/esm/clients/index.js +2 -0
- package/dist/esm/clients/index.js.map +1 -1
- package/dist/esm/interfaces/BundleData.d.ts +63 -0
- package/dist/esm/interfaces/BundleData.js +2 -0
- package/dist/esm/interfaces/BundleData.js.map +1 -0
- package/dist/esm/interfaces/index.d.ts +1 -0
- package/dist/esm/interfaces/index.js +1 -0
- package/dist/esm/interfaces/index.js.map +1 -1
- package/dist/esm/providers/alchemy.js +6 -6
- package/dist/esm/providers/alchemy.js.map +1 -1
- package/dist/esm/providers/drpc.d.ts +2 -0
- package/dist/esm/providers/drpc.js +18 -0
- package/dist/esm/providers/drpc.js.map +1 -0
- package/dist/esm/providers/infura.js +6 -6
- package/dist/esm/providers/infura.js.map +1 -1
- package/dist/esm/providers/types.d.ts +1 -1
- package/dist/esm/providers/utils.js +3 -1
- package/dist/esm/providers/utils.js.map +1 -1
- package/dist/esm/utils/AddressUtils.d.ts +1 -0
- package/dist/esm/utils/AddressUtils.js +16 -1
- package/dist/esm/utils/AddressUtils.js.map +1 -1
- package/dist/esm/utils/ContractUtils.d.ts +1 -0
- package/dist/esm/utils/ContractUtils.js +8 -0
- package/dist/esm/utils/ContractUtils.js.map +1 -0
- package/dist/esm/utils/Multicall.d.ts +3 -2
- package/dist/esm/utils/Multicall.js +39 -42
- package/dist/esm/utils/Multicall.js.map +1 -1
- package/dist/esm/utils/ObjectUtils.d.ts +18 -0
- package/dist/esm/utils/ObjectUtils.js +24 -0
- package/dist/esm/utils/ObjectUtils.js.map +1 -1
- package/dist/esm/utils/abi/contracts/index.d.ts +1 -0
- package/dist/esm/utils/abi/contracts/index.js +2 -0
- package/dist/esm/utils/abi/contracts/index.js.map +1 -0
- package/dist/esm/utils/abi/typechain/Multicall3.d.ts +292 -0
- package/dist/esm/utils/abi/typechain/Multicall3.js +2 -0
- package/dist/esm/utils/abi/typechain/Multicall3.js.map +1 -0
- package/dist/esm/utils/abi/typechain/common.d.ts +21 -0
- package/dist/esm/utils/abi/typechain/common.js +2 -0
- package/dist/esm/utils/abi/typechain/common.js.map +1 -0
- package/dist/esm/utils/abi/typechain/factories/Multicall3__factory.d.ts +339 -0
- package/dist/esm/utils/abi/typechain/factories/Multicall3__factory.js +458 -0
- package/dist/esm/utils/abi/typechain/factories/Multicall3__factory.js.map +1 -0
- package/dist/esm/utils/abi/typechain/factories/index.d.ts +1 -0
- package/dist/esm/utils/abi/typechain/factories/index.js +5 -0
- package/dist/esm/utils/abi/typechain/factories/index.js.map +1 -0
- package/dist/esm/utils/abi/typechain/index.d.ts +3 -0
- package/dist/esm/utils/abi/typechain/index.js +4 -0
- package/dist/esm/utils/abi/typechain/index.js.map +1 -0
- package/dist/types/clients/BundleDataClient/BundleDataClient.d.ts +61 -0
- package/dist/types/clients/BundleDataClient/BundleDataClient.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/index.d.ts +3 -0
- package/dist/types/clients/BundleDataClient/index.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/DataworkerUtils.d.ts +16 -0
- package/dist/types/clients/BundleDataClient/utils/DataworkerUtils.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/FillUtils.d.ts +7 -0
- package/dist/types/clients/BundleDataClient/utils/FillUtils.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/MerkleTreeUtils.d.ts +4 -0
- package/dist/types/clients/BundleDataClient/utils/MerkleTreeUtils.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/PoolRebalanceUtils.d.ts +25 -0
- package/dist/types/clients/BundleDataClient/utils/PoolRebalanceUtils.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/SuperstructUtils.d.ts +303 -0
- package/dist/types/clients/BundleDataClient/utils/SuperstructUtils.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/index.d.ts +7 -0
- package/dist/types/clients/BundleDataClient/utils/index.d.ts.map +1 -0
- package/dist/types/clients/BundleDataClient/utils/shims.d.ts +9 -0
- package/dist/types/clients/BundleDataClient/utils/shims.d.ts.map +1 -0
- package/dist/types/clients/index.d.ts +1 -0
- package/dist/types/clients/index.d.ts.map +1 -1
- package/dist/types/interfaces/BundleData.d.ts +64 -0
- package/dist/types/interfaces/BundleData.d.ts.map +1 -0
- package/dist/types/interfaces/index.d.ts +1 -0
- package/dist/types/interfaces/index.d.ts.map +1 -1
- package/dist/types/providers/drpc.d.ts +3 -0
- package/dist/types/providers/drpc.d.ts.map +1 -0
- package/dist/types/providers/types.d.ts +1 -1
- package/dist/types/providers/types.d.ts.map +1 -1
- package/dist/types/providers/utils.d.ts.map +1 -1
- package/dist/types/utils/AddressUtils.d.ts +1 -0
- package/dist/types/utils/AddressUtils.d.ts.map +1 -1
- package/dist/types/utils/ContractUtils.d.ts +2 -0
- package/dist/types/utils/ContractUtils.d.ts.map +1 -0
- package/dist/types/utils/Multicall.d.ts +3 -2
- package/dist/types/utils/Multicall.d.ts.map +1 -1
- package/dist/types/utils/ObjectUtils.d.ts +18 -0
- package/dist/types/utils/ObjectUtils.d.ts.map +1 -1
- package/dist/types/utils/abi/contracts/index.d.ts +2 -0
- package/dist/types/utils/abi/contracts/index.d.ts.map +1 -0
- package/dist/types/utils/abi/typechain/Multicall3.d.ts +293 -0
- package/dist/types/utils/abi/typechain/Multicall3.d.ts.map +1 -0
- package/dist/types/utils/abi/typechain/common.d.ts +22 -0
- package/dist/types/utils/abi/typechain/common.d.ts.map +1 -0
- package/dist/types/utils/abi/typechain/factories/Multicall3__factory.d.ts +340 -0
- package/dist/types/utils/abi/typechain/factories/Multicall3__factory.d.ts.map +1 -0
- package/dist/types/utils/abi/typechain/factories/index.d.ts +2 -0
- package/dist/types/utils/abi/typechain/factories/index.d.ts.map +1 -0
- package/dist/types/utils/abi/typechain/index.d.ts +4 -0
- package/dist/types/utils/abi/typechain/index.d.ts.map +1 -0
- package/package.json +6 -8
- package/src/clients/BundleDataClient/BundleDataClient.ts +1311 -0
- package/src/clients/BundleDataClient/index.ts +2 -0
- package/src/clients/BundleDataClient/utils/DataworkerUtils.ts +268 -0
- package/src/clients/BundleDataClient/utils/FillUtils.ts +46 -0
- package/src/clients/BundleDataClient/utils/MerkleTreeUtils.ts +26 -0
- package/src/clients/BundleDataClient/utils/PoolRebalanceUtils.ts +238 -0
- package/src/clients/BundleDataClient/utils/SuperstructUtils.ts +132 -0
- package/src/clients/BundleDataClient/utils/index.ts +6 -0
- package/src/clients/BundleDataClient/utils/shims.ts +10 -0
- package/src/clients/index.ts +1 -0
- package/src/interfaces/BundleData.ts +68 -0
- package/src/interfaces/index.ts +1 -0
- package/src/providers/alchemy.ts +6 -6
- package/src/providers/drpc.ts +19 -0
- package/src/providers/infura.ts +6 -6
- package/src/providers/types.ts +1 -1
- package/src/providers/utils.ts +3 -1
- package/src/utils/AddressUtils.ts +15 -1
- package/src/utils/ContractUtils.ts +8 -0
- package/src/utils/Multicall.ts +38 -30
- package/src/utils/ObjectUtils.ts +23 -0
- package/src/utils/abi/contracts/index.ts +1 -0
- package/src/utils/abi/typechain/Multicall3.ts +433 -0
- package/src/utils/abi/typechain/common.ts +30 -0
- package/src/utils/abi/typechain/factories/Multicall3__factory.ts +458 -0
- package/src/utils/abi/typechain/factories/index.ts +4 -0
- package/src/utils/abi/typechain/index.ts +6 -0
|
@@ -0,0 +1,1311 @@
|
|
|
1
|
+
import _ from "lodash";
|
|
2
|
+
import {
|
|
3
|
+
ProposedRootBundle,
|
|
4
|
+
SlowFillRequestWithBlock,
|
|
5
|
+
SpokePoolClientsByChain,
|
|
6
|
+
FillType,
|
|
7
|
+
FillStatus,
|
|
8
|
+
LoadDataReturnValue,
|
|
9
|
+
BundleDepositsV3,
|
|
10
|
+
BundleExcessSlowFills,
|
|
11
|
+
BundleFillsV3,
|
|
12
|
+
BundleFillV3,
|
|
13
|
+
BundleSlowFills,
|
|
14
|
+
ExpiredDepositsToRefundV3,
|
|
15
|
+
Clients,
|
|
16
|
+
CombinedRefunds,
|
|
17
|
+
} from "../../interfaces";
|
|
18
|
+
import { AcrossConfigStoreClient, SpokePoolClient } from "..";
|
|
19
|
+
import {
|
|
20
|
+
bnZero,
|
|
21
|
+
queryHistoricalDepositForFill,
|
|
22
|
+
assign,
|
|
23
|
+
assert,
|
|
24
|
+
fixedPointAdjustment,
|
|
25
|
+
isDefined,
|
|
26
|
+
toBN,
|
|
27
|
+
forEachAsync,
|
|
28
|
+
getBlockRangeForChain,
|
|
29
|
+
getImpliedBundleBlockRanges,
|
|
30
|
+
isSlowFill,
|
|
31
|
+
mapAsync,
|
|
32
|
+
relayFillStatus,
|
|
33
|
+
bnUint32Max,
|
|
34
|
+
} from "../../utils";
|
|
35
|
+
import { BigNumber } from "ethers";
|
|
36
|
+
import winston from "winston";
|
|
37
|
+
import {
|
|
38
|
+
_buildPoolRebalanceRoot,
|
|
39
|
+
BundleDataSS,
|
|
40
|
+
getEndBlockBuffers,
|
|
41
|
+
getRefundInformationFromFill,
|
|
42
|
+
getRefundsFromBundle,
|
|
43
|
+
getWidestPossibleExpectedBlockRange,
|
|
44
|
+
isChainDisabled,
|
|
45
|
+
PoolRebalanceRoot,
|
|
46
|
+
prettyPrintV3SpokePoolEvents,
|
|
47
|
+
V3DepositWithBlock,
|
|
48
|
+
V3FillWithBlock,
|
|
49
|
+
} from "./utils";
|
|
50
|
+
|
|
51
|
+
// max(uint256) - 1
|
|
52
|
+
export const INFINITE_FILL_DEADLINE = bnUint32Max;
|
|
53
|
+
|
|
54
|
+
type DataCache = Record<string, Promise<LoadDataReturnValue>>;
|
|
55
|
+
|
|
56
|
+
// V3 dictionary helper functions
|
|
57
|
+
function updateExpiredDepositsV3(dict: ExpiredDepositsToRefundV3, deposit: V3DepositWithBlock): void {
|
|
58
|
+
const { originChainId, inputToken } = deposit;
|
|
59
|
+
if (!dict?.[originChainId]?.[inputToken]) {
|
|
60
|
+
assign(dict, [originChainId, inputToken], []);
|
|
61
|
+
}
|
|
62
|
+
dict[originChainId][inputToken].push(deposit);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
function updateBundleDepositsV3(dict: BundleDepositsV3, deposit: V3DepositWithBlock): void {
|
|
66
|
+
const { originChainId, inputToken } = deposit;
|
|
67
|
+
if (!dict?.[originChainId]?.[inputToken]) {
|
|
68
|
+
assign(dict, [originChainId, inputToken], []);
|
|
69
|
+
}
|
|
70
|
+
dict[originChainId][inputToken].push(deposit);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
function updateBundleFillsV3(
|
|
74
|
+
dict: BundleFillsV3,
|
|
75
|
+
fill: V3FillWithBlock,
|
|
76
|
+
lpFeePct: BigNumber,
|
|
77
|
+
repaymentChainId: number,
|
|
78
|
+
repaymentToken: string
|
|
79
|
+
): void {
|
|
80
|
+
if (!dict?.[repaymentChainId]?.[repaymentToken]) {
|
|
81
|
+
assign(dict, [repaymentChainId, repaymentToken], {
|
|
82
|
+
fills: [],
|
|
83
|
+
totalRefundAmount: bnZero,
|
|
84
|
+
realizedLpFees: bnZero,
|
|
85
|
+
refunds: {},
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const bundleFill: BundleFillV3 = { ...fill, lpFeePct };
|
|
90
|
+
|
|
91
|
+
// Add all fills, slow and fast, to dictionary.
|
|
92
|
+
assign(dict, [repaymentChainId, repaymentToken, "fills"], [bundleFill]);
|
|
93
|
+
|
|
94
|
+
// All fills update the bundle LP fees.
|
|
95
|
+
const refundObj = dict[repaymentChainId][repaymentToken];
|
|
96
|
+
const realizedLpFee = fill.inputAmount.mul(bundleFill.lpFeePct).div(fixedPointAdjustment);
|
|
97
|
+
refundObj.realizedLpFees = refundObj.realizedLpFees ? refundObj.realizedLpFees.add(realizedLpFee) : realizedLpFee;
|
|
98
|
+
|
|
99
|
+
// Only fast fills get refunded.
|
|
100
|
+
if (!isSlowFill(fill)) {
|
|
101
|
+
const refundAmount = fill.inputAmount.mul(fixedPointAdjustment.sub(lpFeePct)).div(fixedPointAdjustment);
|
|
102
|
+
refundObj.totalRefundAmount = refundObj.totalRefundAmount
|
|
103
|
+
? refundObj.totalRefundAmount.add(refundAmount)
|
|
104
|
+
: refundAmount;
|
|
105
|
+
|
|
106
|
+
// Instantiate dictionary if it doesn't exist.
|
|
107
|
+
refundObj.refunds ??= {};
|
|
108
|
+
|
|
109
|
+
if (refundObj.refunds[fill.relayer]) {
|
|
110
|
+
refundObj.refunds[fill.relayer] = refundObj.refunds[fill.relayer].add(refundAmount);
|
|
111
|
+
} else {
|
|
112
|
+
refundObj.refunds[fill.relayer] = refundAmount;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
function updateBundleExcessSlowFills(
|
|
118
|
+
dict: BundleExcessSlowFills,
|
|
119
|
+
deposit: V3DepositWithBlock & { lpFeePct: BigNumber }
|
|
120
|
+
): void {
|
|
121
|
+
const { destinationChainId, outputToken } = deposit;
|
|
122
|
+
if (!dict?.[destinationChainId]?.[outputToken]) {
|
|
123
|
+
assign(dict, [destinationChainId, outputToken], []);
|
|
124
|
+
}
|
|
125
|
+
dict[destinationChainId][outputToken].push(deposit);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
function updateBundleSlowFills(dict: BundleSlowFills, deposit: V3DepositWithBlock & { lpFeePct: BigNumber }): void {
|
|
129
|
+
const { destinationChainId, outputToken } = deposit;
|
|
130
|
+
if (!dict?.[destinationChainId]?.[outputToken]) {
|
|
131
|
+
assign(dict, [destinationChainId, outputToken], []);
|
|
132
|
+
}
|
|
133
|
+
dict[destinationChainId][outputToken].push(deposit);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// @notice Shared client for computing data needed to construct or validate a bundle.
|
|
137
|
+
export class BundleDataClient {
|
|
138
|
+
private loadDataCache: DataCache = {};
|
|
139
|
+
private arweaveDataCache: Record<string, Promise<LoadDataReturnValue | undefined>> = {};
|
|
140
|
+
|
|
141
|
+
private bundleTimestampCache: Record<string, { [chainId: number]: number[] }> = {};
|
|
142
|
+
|
|
143
|
+
// eslint-disable-next-line no-useless-constructor
|
|
144
|
+
constructor(
|
|
145
|
+
readonly logger: winston.Logger,
|
|
146
|
+
readonly clients: Clients,
|
|
147
|
+
readonly spokePoolClients: { [chainId: number]: SpokePoolClient },
|
|
148
|
+
readonly chainIdListForBundleEvaluationBlockNumbers: number[],
|
|
149
|
+
readonly blockRangeEndBlockBuffer: { [chainId: number]: number } = {}
|
|
150
|
+
) {}
|
|
151
|
+
|
|
152
|
+
// This should be called whenever it's possible that the loadData information for a block range could have changed.
|
|
153
|
+
// For instance, if the spoke or hub clients have been updated, it probably makes sense to clear this to be safe.
|
|
154
|
+
clearCache(): void {
|
|
155
|
+
this.loadDataCache = {};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
private async loadDataFromCache(key: string): Promise<LoadDataReturnValue> {
|
|
159
|
+
// Always return a deep cloned copy of object stored in cache. Since JS passes by reference instead of value, we
|
|
160
|
+
// want to minimize the risk that the programmer accidentally mutates data in the cache.
|
|
161
|
+
return _.cloneDeep(await this.loadDataCache[key]);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
getBundleTimestampsFromCache(key: string): undefined | { [chainId: number]: number[] } {
|
|
165
|
+
if (this.bundleTimestampCache[key]) {
|
|
166
|
+
return _.cloneDeep(this.bundleTimestampCache[key]);
|
|
167
|
+
}
|
|
168
|
+
return undefined;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
setBundleTimestampsInCache(key: string, timestamps: { [chainId: number]: number[] }): void {
|
|
172
|
+
this.bundleTimestampCache[key] = timestamps;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
private getArweaveClientKey(blockRangesForChains: number[][]): string {
|
|
176
|
+
return `bundles-${blockRangesForChains}`;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
private async loadPersistedDataFromArweave(
|
|
180
|
+
blockRangesForChains: number[][]
|
|
181
|
+
): Promise<LoadDataReturnValue | undefined> {
|
|
182
|
+
if (!isDefined(this.clients?.arweaveClient)) {
|
|
183
|
+
return undefined;
|
|
184
|
+
}
|
|
185
|
+
const start = performance.now();
|
|
186
|
+
const persistedData = await this.clients.arweaveClient.getByTopic(
|
|
187
|
+
this.getArweaveClientKey(blockRangesForChains),
|
|
188
|
+
BundleDataSS
|
|
189
|
+
);
|
|
190
|
+
// If there is no data or the data is empty, return undefined because we couldn't
|
|
191
|
+
// pull info from the Arweave persistence layer.
|
|
192
|
+
if (!isDefined(persistedData) || persistedData.length < 1) {
|
|
193
|
+
return undefined;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// A converter function to account for the fact that our SuperStruct schema does not support numeric
|
|
197
|
+
// keys in records. Fundamentally, this is a limitation of superstruct itself.
|
|
198
|
+
const convertTypedStringRecordIntoNumericRecord = <UnderlyingType>(
|
|
199
|
+
data: Record<string, Record<string, UnderlyingType>>
|
|
200
|
+
): Record<number, Record<string, UnderlyingType>> =>
|
|
201
|
+
Object.keys(data).reduce(
|
|
202
|
+
(acc, chainId) => {
|
|
203
|
+
acc[Number(chainId)] = data[chainId];
|
|
204
|
+
return acc;
|
|
205
|
+
},
|
|
206
|
+
{} as Record<number, Record<string, UnderlyingType>>
|
|
207
|
+
);
|
|
208
|
+
|
|
209
|
+
const data = persistedData[0].data;
|
|
210
|
+
const bundleData = {
|
|
211
|
+
bundleFillsV3: convertTypedStringRecordIntoNumericRecord(data.bundleFillsV3),
|
|
212
|
+
expiredDepositsToRefundV3: convertTypedStringRecordIntoNumericRecord(data.expiredDepositsToRefundV3),
|
|
213
|
+
bundleDepositsV3: convertTypedStringRecordIntoNumericRecord(data.bundleDepositsV3),
|
|
214
|
+
unexecutableSlowFills: convertTypedStringRecordIntoNumericRecord(data.unexecutableSlowFills),
|
|
215
|
+
bundleSlowFillsV3: convertTypedStringRecordIntoNumericRecord(data.bundleSlowFillsV3),
|
|
216
|
+
};
|
|
217
|
+
this.logger.debug({
|
|
218
|
+
at: "BundleDataClient#loadPersistedDataFromArweave",
|
|
219
|
+
message: `Loaded persisted data from Arweave in ${Math.round(performance.now() - start) / 1000}s.`,
|
|
220
|
+
blockRanges: JSON.stringify(blockRangesForChains),
|
|
221
|
+
bundleData: prettyPrintV3SpokePoolEvents(
|
|
222
|
+
bundleData.bundleDepositsV3,
|
|
223
|
+
bundleData.bundleFillsV3,
|
|
224
|
+
[], // Invalid fills are not persisted to Arweave.
|
|
225
|
+
bundleData.bundleSlowFillsV3,
|
|
226
|
+
bundleData.expiredDepositsToRefundV3,
|
|
227
|
+
bundleData.unexecutableSlowFills
|
|
228
|
+
),
|
|
229
|
+
});
|
|
230
|
+
return bundleData;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// @dev This function should probably be moved to the InventoryClient since it bypasses loadData completely now.
|
|
234
|
+
async getPendingRefundsFromValidBundles(): Promise<CombinedRefunds[]> {
|
|
235
|
+
const refunds = [];
|
|
236
|
+
if (!this.clients.hubPoolClient.isUpdated) {
|
|
237
|
+
throw new Error("BundleDataClient::getPendingRefundsFromValidBundles HubPoolClient not updated.");
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
const bundle = this.clients.hubPoolClient.getLatestFullyExecutedRootBundle(
|
|
241
|
+
this.clients.hubPoolClient.latestBlockSearched
|
|
242
|
+
);
|
|
243
|
+
if (bundle !== undefined) {
|
|
244
|
+
refunds.push(await this.getPendingRefundsFromBundle(bundle));
|
|
245
|
+
} // No more valid bundles in history!
|
|
246
|
+
return refunds;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// @dev This function should probably be moved to the InventoryClient since it bypasses loadData completely now.
|
|
250
|
+
// Return refunds from input bundle.
|
|
251
|
+
async getPendingRefundsFromBundle(bundle: ProposedRootBundle): Promise<CombinedRefunds> {
|
|
252
|
+
const nextBundleMainnetStartBlock = this.clients.hubPoolClient.getNextBundleStartBlockNumber(
|
|
253
|
+
this.chainIdListForBundleEvaluationBlockNumbers,
|
|
254
|
+
this.clients.hubPoolClient.latestBlockSearched,
|
|
255
|
+
this.clients.hubPoolClient.chainId
|
|
256
|
+
);
|
|
257
|
+
const chainIds = this.clients.configStoreClient.getChainIdIndicesForBlock(nextBundleMainnetStartBlock);
|
|
258
|
+
|
|
259
|
+
// Reconstruct latest bundle block range.
|
|
260
|
+
const bundleEvaluationBlockRanges = getImpliedBundleBlockRanges(
|
|
261
|
+
this.clients.hubPoolClient,
|
|
262
|
+
this.clients.configStoreClient,
|
|
263
|
+
bundle
|
|
264
|
+
);
|
|
265
|
+
let combinedRefunds: CombinedRefunds;
|
|
266
|
+
// Here we don't call loadData because our fallback is to approximate refunds if we don't have arweave data, rather
|
|
267
|
+
// than use the much slower loadData to compute all refunds. We don't need to consider slow fills or deposit
|
|
268
|
+
// expiries here so we can skip some steps. We also don't need to compute LP fees as they should be small enough
|
|
269
|
+
// so as not to affect this approximate refund count.
|
|
270
|
+
const arweaveData = await this.loadArweaveData(bundleEvaluationBlockRanges);
|
|
271
|
+
if (arweaveData === undefined) {
|
|
272
|
+
combinedRefunds = this.getApproximateRefundsForBlockRange(chainIds, bundleEvaluationBlockRanges);
|
|
273
|
+
} else {
|
|
274
|
+
const { bundleFillsV3, expiredDepositsToRefundV3 } = arweaveData;
|
|
275
|
+
combinedRefunds = getRefundsFromBundle(bundleFillsV3, expiredDepositsToRefundV3);
|
|
276
|
+
// If we don't have a spoke pool client for a chain, then we won't be able to deduct refunds correctly for this
|
|
277
|
+
// chain. For most of the pending bundle's liveness period, these past refunds are already executed so this is
|
|
278
|
+
// a reasonable assumption. This empty refund chain also matches what the alternative
|
|
279
|
+
// `getApproximateRefundsForBlockRange` would return.
|
|
280
|
+
Object.keys(combinedRefunds).forEach((chainId) => {
|
|
281
|
+
if (this.spokePoolClients[Number(chainId)] === undefined) {
|
|
282
|
+
delete combinedRefunds[Number(chainId)];
|
|
283
|
+
}
|
|
284
|
+
});
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// The latest proposed bundle's refund leaves might have already been partially or entirely executed.
|
|
288
|
+
// We have to deduct the executed amounts from the total refund amounts.
|
|
289
|
+
return this.deductExecutedRefunds(combinedRefunds, bundle);
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
// @dev This helper function should probably be moved to the InventoryClient
|
|
293
|
+
getApproximateRefundsForBlockRange(chainIds: number[], blockRanges: number[][]): CombinedRefunds {
|
|
294
|
+
const refundsForChain: CombinedRefunds = {};
|
|
295
|
+
for (const chainId of chainIds) {
|
|
296
|
+
if (this.spokePoolClients[chainId] === undefined) {
|
|
297
|
+
continue;
|
|
298
|
+
}
|
|
299
|
+
const chainIndex = chainIds.indexOf(chainId);
|
|
300
|
+
this.spokePoolClients[chainId]
|
|
301
|
+
.getFills()
|
|
302
|
+
.filter((fill) => {
|
|
303
|
+
if (fill.blockNumber < blockRanges[chainIndex][0] || fill.blockNumber > blockRanges[chainIndex][1]) {
|
|
304
|
+
return false;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// If origin spoke pool client isn't defined, we can't validate it.
|
|
308
|
+
if (this.spokePoolClients[fill.originChainId] === undefined) {
|
|
309
|
+
return false;
|
|
310
|
+
}
|
|
311
|
+
const matchingDeposit = this.spokePoolClients[fill.originChainId].getDeposit(fill.depositId);
|
|
312
|
+
const hasMatchingDeposit =
|
|
313
|
+
matchingDeposit !== undefined &&
|
|
314
|
+
this.getRelayHashFromEvent(fill) === this.getRelayHashFromEvent(matchingDeposit);
|
|
315
|
+
return hasMatchingDeposit;
|
|
316
|
+
})
|
|
317
|
+
.forEach((fill) => {
|
|
318
|
+
const matchingDeposit = this.spokePoolClients[fill.originChainId].getDeposit(fill.depositId);
|
|
319
|
+
assert(isDefined(matchingDeposit), "Deposit not found for fill.");
|
|
320
|
+
const { chainToSendRefundTo, repaymentToken } = getRefundInformationFromFill(
|
|
321
|
+
fill,
|
|
322
|
+
this.clients.hubPoolClient,
|
|
323
|
+
blockRanges,
|
|
324
|
+
this.chainIdListForBundleEvaluationBlockNumbers,
|
|
325
|
+
matchingDeposit!.fromLiteChain // Use ! because we've already asserted that matchingDeposit is defined.
|
|
326
|
+
);
|
|
327
|
+
// Assume that lp fees are 0 for the sake of speed. In the future we could batch compute
|
|
328
|
+
// these or make hardcoded assumptions based on the origin-repayment chain direction. This might result
|
|
329
|
+
// in slight over estimations of refunds, but its not clear whether underestimating or overestimating is
|
|
330
|
+
// worst from the relayer's perspective.
|
|
331
|
+
const { relayer, inputAmount: refundAmount } = fill;
|
|
332
|
+
refundsForChain[chainToSendRefundTo] ??= {};
|
|
333
|
+
refundsForChain[chainToSendRefundTo][repaymentToken] ??= {};
|
|
334
|
+
const existingRefundAmount = refundsForChain[chainToSendRefundTo][repaymentToken][relayer] ?? bnZero;
|
|
335
|
+
refundsForChain[chainToSendRefundTo][repaymentToken][relayer] = existingRefundAmount.add(refundAmount);
|
|
336
|
+
});
|
|
337
|
+
}
|
|
338
|
+
return refundsForChain;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
getUpcomingDepositAmount(chainId: number, l2Token: string, latestBlockToSearch: number): BigNumber {
|
|
342
|
+
if (this.spokePoolClients[chainId] === undefined) {
|
|
343
|
+
return toBN(0);
|
|
344
|
+
}
|
|
345
|
+
return this.spokePoolClients[chainId]
|
|
346
|
+
.getDeposits()
|
|
347
|
+
.filter((deposit) => deposit.blockNumber > latestBlockToSearch && deposit.inputToken === l2Token)
|
|
348
|
+
.reduce((acc, deposit) => {
|
|
349
|
+
return acc.add(deposit.inputAmount);
|
|
350
|
+
}, toBN(0));
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
private async getLatestProposedBundleData(): Promise<{ bundleData: LoadDataReturnValue; blockRanges: number[][] }> {
|
|
354
|
+
const hubPoolClient = this.clients.hubPoolClient;
|
|
355
|
+
// Determine which bundle we should fetch from arweave, either the pending bundle or the latest
|
|
356
|
+
// executed one. Both should have arweave data but if for some reason the arweave data is missing,
|
|
357
|
+
// this function will have to compute the bundle data from scratch which will be slow. We have to fallback
|
|
358
|
+
// to computing the bundle from scratch since this function needs to return the full bundle data so that
|
|
359
|
+
// it can be used to get the running balance proposed using its data.
|
|
360
|
+
const bundleBlockRanges = getImpliedBundleBlockRanges(
|
|
361
|
+
hubPoolClient,
|
|
362
|
+
this.clients.configStoreClient,
|
|
363
|
+
hubPoolClient.hasPendingProposal()
|
|
364
|
+
? hubPoolClient.getLatestProposedRootBundle()
|
|
365
|
+
: hubPoolClient.getLatestFullyExecutedRootBundle(hubPoolClient.latestBlockSearched)! // ! because we know there is a bundle
|
|
366
|
+
);
|
|
367
|
+
return {
|
|
368
|
+
blockRanges: bundleBlockRanges,
|
|
369
|
+
bundleData: await this.loadData(
|
|
370
|
+
bundleBlockRanges,
|
|
371
|
+
this.spokePoolClients,
|
|
372
|
+
true // this bundle data should have been published to arweave
|
|
373
|
+
),
|
|
374
|
+
};
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
async getLatestPoolRebalanceRoot(): Promise<{ root: PoolRebalanceRoot; blockRanges: number[][] }> {
|
|
378
|
+
const { bundleData, blockRanges } = await this.getLatestProposedBundleData();
|
|
379
|
+
const hubPoolClient = this.clients.hubPoolClient;
|
|
380
|
+
const root = await _buildPoolRebalanceRoot(
|
|
381
|
+
hubPoolClient.latestBlockSearched,
|
|
382
|
+
blockRanges[0][1],
|
|
383
|
+
bundleData.bundleDepositsV3,
|
|
384
|
+
bundleData.bundleFillsV3,
|
|
385
|
+
bundleData.bundleSlowFillsV3,
|
|
386
|
+
bundleData.unexecutableSlowFills,
|
|
387
|
+
bundleData.expiredDepositsToRefundV3,
|
|
388
|
+
{
|
|
389
|
+
hubPoolClient,
|
|
390
|
+
configStoreClient: hubPoolClient.configStoreClient as AcrossConfigStoreClient,
|
|
391
|
+
}
|
|
392
|
+
);
|
|
393
|
+
return {
|
|
394
|
+
root,
|
|
395
|
+
blockRanges,
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// @dev This function should probably be moved to the InventoryClient since it bypasses loadData completely now.
|
|
400
|
+
// Return refunds from the next valid bundle. This will contain any refunds that have been sent but are not included
|
|
401
|
+
// in a valid bundle with all of its leaves executed. This contains refunds from:
|
|
402
|
+
// - Bundles that passed liveness but have not had all of their pool rebalance leaves executed.
|
|
403
|
+
// - Bundles that are pending liveness
|
|
404
|
+
// - Fills sent after the pending, but not validated, bundle
|
|
405
|
+
async getNextBundleRefunds(): Promise<CombinedRefunds[]> {
|
|
406
|
+
const hubPoolClient = this.clients.hubPoolClient;
|
|
407
|
+
const nextBundleMainnetStartBlock = hubPoolClient.getNextBundleStartBlockNumber(
|
|
408
|
+
this.chainIdListForBundleEvaluationBlockNumbers,
|
|
409
|
+
hubPoolClient.latestBlockSearched,
|
|
410
|
+
hubPoolClient.chainId
|
|
411
|
+
);
|
|
412
|
+
const chainIds = this.clients.configStoreClient.getChainIdIndicesForBlock(nextBundleMainnetStartBlock);
|
|
413
|
+
const combinedRefunds: CombinedRefunds[] = [];
|
|
414
|
+
|
|
415
|
+
// @dev: If spoke pool client is undefined for a chain, then the end block will be null or undefined, which
|
|
416
|
+
// should be handled gracefully and effectively cause this function to ignore refunds for the chain.
|
|
417
|
+
let widestBundleBlockRanges = getWidestPossibleExpectedBlockRange(
|
|
418
|
+
chainIds,
|
|
419
|
+
this.spokePoolClients,
|
|
420
|
+
getEndBlockBuffers(chainIds, this.blockRangeEndBlockBuffer),
|
|
421
|
+
this.clients,
|
|
422
|
+
this.clients.hubPoolClient.latestBlockSearched,
|
|
423
|
+
this.clients.configStoreClient.getEnabledChains(this.clients.hubPoolClient.latestBlockSearched)
|
|
424
|
+
);
|
|
425
|
+
// Return block ranges for blocks after _pendingBlockRanges and up to widestBlockRanges.
|
|
426
|
+
// If a chain is disabled or doesn't have a spoke pool client, return a range of 0
|
|
427
|
+
function getBlockRangeDelta(_pendingBlockRanges: number[][]): number[][] {
|
|
428
|
+
return widestBundleBlockRanges.map((blockRange, index) => {
|
|
429
|
+
// If pending block range doesn't have an entry for the widest range, which is possible when a new chain
|
|
430
|
+
// is added to the CHAIN_ID_INDICES list, then simply set the initial block range to the widest block range.
|
|
431
|
+
// This will produce a block range delta of 0 where the returned range for this chain is [widest[1], widest[1]].
|
|
432
|
+
const initialBlockRange = _pendingBlockRanges[index] ?? blockRange;
|
|
433
|
+
// If chain is disabled, return disabled range
|
|
434
|
+
if (initialBlockRange[0] === initialBlockRange[1]) {
|
|
435
|
+
return initialBlockRange;
|
|
436
|
+
}
|
|
437
|
+
// If pending bundle end block exceeds widest end block or if widest end block is undefined
|
|
438
|
+
// (which is possible if the spoke pool client for the chain is not defined), return an empty range since there are no
|
|
439
|
+
// "new" events to consider for this chain.
|
|
440
|
+
if (!isDefined(blockRange[1]) || initialBlockRange[1] >= blockRange[1]) {
|
|
441
|
+
return [initialBlockRange[1], initialBlockRange[1]];
|
|
442
|
+
}
|
|
443
|
+
// If initialBlockRange][0] > widestBlockRange[0], then we'll ignore any blocks
|
|
444
|
+
// between initialBlockRange[0] and widestBlockRange[0] (inclusive) for simplicity reasons. In practice
|
|
445
|
+
// this should not happen.
|
|
446
|
+
return [initialBlockRange[1] + 1, blockRange[1]];
|
|
447
|
+
});
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
// If there is a pending bundle that has not been fully executed, then it should have arweave
|
|
451
|
+
// data so we can load it from there.
|
|
452
|
+
if (hubPoolClient.hasPendingProposal()) {
|
|
453
|
+
const pendingBundleBlockRanges = getImpliedBundleBlockRanges(
|
|
454
|
+
hubPoolClient,
|
|
455
|
+
this.clients.configStoreClient,
|
|
456
|
+
hubPoolClient.getLatestProposedRootBundle()
|
|
457
|
+
);
|
|
458
|
+
// Similar to getAppoximateRefundsForBlockRange, we'll skip the full bundle reconstruction if the arweave
|
|
459
|
+
// data is undefined and use the much faster approximation method which doesn't consider LP fees which is
|
|
460
|
+
// ok for this use case.
|
|
461
|
+
const arweaveData = await this.loadArweaveData(pendingBundleBlockRanges);
|
|
462
|
+
if (arweaveData === undefined) {
|
|
463
|
+
combinedRefunds.push(this.getApproximateRefundsForBlockRange(chainIds, pendingBundleBlockRanges));
|
|
464
|
+
} else {
|
|
465
|
+
const { bundleFillsV3, expiredDepositsToRefundV3 } = arweaveData;
|
|
466
|
+
combinedRefunds.push(getRefundsFromBundle(bundleFillsV3, expiredDepositsToRefundV3));
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
// Shorten the widestBundleBlockRanges now to not double count the pending bundle blocks.
|
|
470
|
+
widestBundleBlockRanges = getBlockRangeDelta(pendingBundleBlockRanges);
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// Next, load all refunds sent after the last bundle proposal. This can be expensive so we'll skip the full
|
|
474
|
+
// bundle reconstruction and make some simplifying assumptions:
|
|
475
|
+
// - Only look up fills sent after the pending bundle's end blocks
|
|
476
|
+
// - Skip LP fee computations and just assume the relayer is being refunded the full deposit.inputAmount
|
|
477
|
+
const start = performance.now();
|
|
478
|
+
combinedRefunds.push(this.getApproximateRefundsForBlockRange(chainIds, widestBundleBlockRanges));
|
|
479
|
+
this.logger.debug({
|
|
480
|
+
at: "BundleDataClient#getNextBundleRefunds",
|
|
481
|
+
message: `Loading approximate refunds for next bundle in ${Math.round(performance.now() - start) / 1000}s.`,
|
|
482
|
+
blockRanges: JSON.stringify(widestBundleBlockRanges),
|
|
483
|
+
});
|
|
484
|
+
return combinedRefunds;
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
// @dev This helper function should probably be moved to the InventoryClient
|
|
488
|
+
getExecutedRefunds(
|
|
489
|
+
spokePoolClient: SpokePoolClient,
|
|
490
|
+
relayerRefundRoot: string
|
|
491
|
+
): {
|
|
492
|
+
[tokenAddress: string]: {
|
|
493
|
+
[relayer: string]: BigNumber;
|
|
494
|
+
};
|
|
495
|
+
} {
|
|
496
|
+
if (!isDefined(spokePoolClient)) {
|
|
497
|
+
return {};
|
|
498
|
+
}
|
|
499
|
+
// @dev Search from right to left since there can be multiple root bundles with the same relayer refund root.
|
|
500
|
+
// The caller should take caution if they're trying to use this function to find matching refunds for older
|
|
501
|
+
// root bundles as opposed to more recent ones.
|
|
502
|
+
const bundle = _.findLast(
|
|
503
|
+
spokePoolClient.getRootBundleRelays(),
|
|
504
|
+
(bundle) => bundle.relayerRefundRoot === relayerRefundRoot
|
|
505
|
+
);
|
|
506
|
+
if (bundle === undefined) {
|
|
507
|
+
return {};
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
const executedRefundLeaves = spokePoolClient
|
|
511
|
+
.getRelayerRefundExecutions()
|
|
512
|
+
.filter((leaf) => leaf.rootBundleId === bundle.rootBundleId);
|
|
513
|
+
const executedRefunds: { [tokenAddress: string]: { [relayer: string]: BigNumber } } = {};
|
|
514
|
+
for (const refundLeaf of executedRefundLeaves) {
|
|
515
|
+
const tokenAddress = refundLeaf.l2TokenAddress;
|
|
516
|
+
if (executedRefunds[tokenAddress] === undefined) {
|
|
517
|
+
executedRefunds[tokenAddress] = {};
|
|
518
|
+
}
|
|
519
|
+
const executedTokenRefunds = executedRefunds[tokenAddress];
|
|
520
|
+
|
|
521
|
+
for (let i = 0; i < refundLeaf.refundAddresses.length; i++) {
|
|
522
|
+
const relayer = refundLeaf.refundAddresses[i];
|
|
523
|
+
const refundAmount = refundLeaf.refundAmounts[i];
|
|
524
|
+
if (executedTokenRefunds[relayer] === undefined) {
|
|
525
|
+
executedTokenRefunds[relayer] = bnZero;
|
|
526
|
+
}
|
|
527
|
+
executedTokenRefunds[relayer] = executedTokenRefunds[relayer].add(refundAmount);
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
return executedRefunds;
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
// @dev This helper function should probably be moved to the InventoryClient
|
|
534
|
+
private deductExecutedRefunds(
|
|
535
|
+
allRefunds: CombinedRefunds,
|
|
536
|
+
bundleContainingRefunds: ProposedRootBundle
|
|
537
|
+
): CombinedRefunds {
|
|
538
|
+
for (const chainIdStr of Object.keys(allRefunds)) {
|
|
539
|
+
const chainId = Number(chainIdStr);
|
|
540
|
+
if (!isDefined(this.spokePoolClients[chainId])) {
|
|
541
|
+
continue;
|
|
542
|
+
}
|
|
543
|
+
const executedRefunds = this.getExecutedRefunds(
|
|
544
|
+
this.spokePoolClients[chainId],
|
|
545
|
+
bundleContainingRefunds.relayerRefundRoot
|
|
546
|
+
);
|
|
547
|
+
|
|
548
|
+
for (const tokenAddress of Object.keys(allRefunds[chainId])) {
|
|
549
|
+
const refunds = allRefunds[chainId][tokenAddress];
|
|
550
|
+
if (executedRefunds[tokenAddress] === undefined || refunds === undefined) {
|
|
551
|
+
continue;
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
for (const relayer of Object.keys(refunds)) {
|
|
555
|
+
const executedAmount = executedRefunds[tokenAddress][relayer];
|
|
556
|
+
if (executedAmount === undefined) {
|
|
557
|
+
continue;
|
|
558
|
+
}
|
|
559
|
+
// Since there should only be a single executed relayer refund leaf for each relayer-token-chain combination,
|
|
560
|
+
// we can deduct this refund and mark it as executed if the executed amount is > 0.
|
|
561
|
+
refunds[relayer] = bnZero;
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
return allRefunds;
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
getRefundsFor(bundleRefunds: CombinedRefunds, relayer: string, chainId: number, token: string): BigNumber {
|
|
569
|
+
if (!bundleRefunds[chainId] || !bundleRefunds[chainId][token]) {
|
|
570
|
+
return BigNumber.from(0);
|
|
571
|
+
}
|
|
572
|
+
const allRefunds = bundleRefunds[chainId][token];
|
|
573
|
+
return allRefunds && allRefunds[relayer] ? allRefunds[relayer] : BigNumber.from(0);
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
getTotalRefund(refunds: CombinedRefunds[], relayer: string, chainId: number, refundToken: string): BigNumber {
|
|
577
|
+
return refunds.reduce((totalRefund, refunds) => {
|
|
578
|
+
return totalRefund.add(this.getRefundsFor(refunds, relayer, chainId, refundToken));
|
|
579
|
+
}, bnZero);
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
private async loadArweaveData(blockRangesForChains: number[][]): Promise<LoadDataReturnValue> {
|
|
583
|
+
const arweaveKey = this.getArweaveClientKey(blockRangesForChains);
|
|
584
|
+
// eslint-disable-next-line @typescript-eslint/no-misused-promises
|
|
585
|
+
if (!this.arweaveDataCache[arweaveKey]) {
|
|
586
|
+
this.arweaveDataCache[arweaveKey] = this.loadPersistedDataFromArweave(blockRangesForChains);
|
|
587
|
+
}
|
|
588
|
+
const arweaveData = _.cloneDeep(await this.arweaveDataCache[arweaveKey]);
|
|
589
|
+
return arweaveData!;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
// Common data re-formatting logic shared across all data worker public functions.
|
|
593
|
+
// User must pass in spoke pool to search event data against. This allows the user to refund relays and fill deposits
|
|
594
|
+
// on deprecated spoke pools.
|
|
595
|
+
async loadData(
|
|
596
|
+
blockRangesForChains: number[][],
|
|
597
|
+
spokePoolClients: SpokePoolClientsByChain,
|
|
598
|
+
attemptArweaveLoad = false
|
|
599
|
+
): Promise<LoadDataReturnValue> {
|
|
600
|
+
const key = JSON.stringify(blockRangesForChains);
|
|
601
|
+
// eslint-disable-next-line @typescript-eslint/no-misused-promises
|
|
602
|
+
if (!this.loadDataCache[key]) {
|
|
603
|
+
let arweaveData;
|
|
604
|
+
if (attemptArweaveLoad) {
|
|
605
|
+
arweaveData = await this.loadArweaveData(blockRangesForChains);
|
|
606
|
+
} else {
|
|
607
|
+
arweaveData = undefined;
|
|
608
|
+
}
|
|
609
|
+
const data = isDefined(arweaveData)
|
|
610
|
+
? // We can return the data to a Promise to keep the return type consistent.
|
|
611
|
+
// Note: this is now a fast operation since we've already loaded the data from Arweave.
|
|
612
|
+
Promise.resolve(arweaveData)
|
|
613
|
+
: this.loadDataFromScratch(blockRangesForChains, spokePoolClients);
|
|
614
|
+
this.loadDataCache[key] = data;
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
return this.loadDataFromCache(key);
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
private async loadDataFromScratch(
|
|
621
|
+
blockRangesForChains: number[][],
|
|
622
|
+
spokePoolClients: SpokePoolClientsByChain
|
|
623
|
+
): Promise<LoadDataReturnValue> {
|
|
624
|
+
let start = performance.now();
|
|
625
|
+
const key = JSON.stringify(blockRangesForChains);
|
|
626
|
+
|
|
627
|
+
if (!this.clients.configStoreClient.isUpdated) {
|
|
628
|
+
throw new Error("ConfigStoreClient not updated");
|
|
629
|
+
} else if (!this.clients.hubPoolClient.isUpdated) {
|
|
630
|
+
throw new Error("HubPoolClient not updated");
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
const chainIds = this.clients.configStoreClient.getChainIdIndicesForBlock(blockRangesForChains[0][0]);
|
|
634
|
+
|
|
635
|
+
if (blockRangesForChains.length > chainIds.length) {
|
|
636
|
+
throw new Error(
|
|
637
|
+
`Unexpected block range list length of ${blockRangesForChains.length}, should be <= ${chainIds.length}`
|
|
638
|
+
);
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
// V3 specific objects:
|
|
642
|
+
const bundleDepositsV3: BundleDepositsV3 = {}; // Deposits in bundle block range.
|
|
643
|
+
const bundleFillsV3: BundleFillsV3 = {}; // Fills to refund in bundle block range.
|
|
644
|
+
const bundleInvalidFillsV3: V3FillWithBlock[] = []; // Fills that are not valid in this bundle.
|
|
645
|
+
const bundleSlowFillsV3: BundleSlowFills = {}; // Deposits that we need to send slow fills
|
|
646
|
+
// for in this bundle.
|
|
647
|
+
const expiredDepositsToRefundV3: ExpiredDepositsToRefundV3 = {};
|
|
648
|
+
// Newly expired deposits in this bundle that need to be refunded.
|
|
649
|
+
const unexecutableSlowFills: BundleExcessSlowFills = {};
|
|
650
|
+
// Deposit data for all Slowfills that was included in a previous
|
|
651
|
+
// bundle and can no longer be executed because (1) they were replaced with a FastFill in this bundle or
|
|
652
|
+
// (2) the fill deadline has passed. We'll need to decrement running balances for these deposits on the
|
|
653
|
+
// destination chain where the slow fill would have been executed.
|
|
654
|
+
|
|
655
|
+
const _isChainDisabled = (chainId: number): boolean => {
|
|
656
|
+
const blockRangeForChain = getBlockRangeForChain(blockRangesForChains, chainId, chainIds);
|
|
657
|
+
return isChainDisabled(blockRangeForChain);
|
|
658
|
+
};
|
|
659
|
+
|
|
660
|
+
// Infer chain ID's to load from number of block ranges passed in.
|
|
661
|
+
const allChainIds = blockRangesForChains
|
|
662
|
+
.map((_blockRange, index) => chainIds[index])
|
|
663
|
+
.filter((chainId) => !_isChainDisabled(chainId) && spokePoolClients[chainId] !== undefined);
|
|
664
|
+
allChainIds.forEach((chainId) => {
|
|
665
|
+
const spokePoolClient = spokePoolClients[chainId];
|
|
666
|
+
if (!spokePoolClient.isUpdated) {
|
|
667
|
+
throw new Error(`SpokePoolClient for chain ${chainId} not updated.`);
|
|
668
|
+
}
|
|
669
|
+
});
|
|
670
|
+
|
|
671
|
+
// If spoke pools are V3 contracts, then we need to compute start and end timestamps for block ranges to
|
|
672
|
+
// determine whether fillDeadlines have expired.
|
|
673
|
+
// @dev Going to leave this in so we can see impact on run-time in prod. This makes (allChainIds.length * 2) RPC
|
|
674
|
+
// calls in parallel.
|
|
675
|
+
const _cachedBundleTimestamps = this.getBundleTimestampsFromCache(key);
|
|
676
|
+
let bundleBlockTimestamps: { [chainId: string]: number[] } = {};
|
|
677
|
+
if (!_cachedBundleTimestamps) {
|
|
678
|
+
bundleBlockTimestamps = await this.getBundleBlockTimestamps(chainIds, blockRangesForChains, spokePoolClients);
|
|
679
|
+
this.setBundleTimestampsInCache(key, bundleBlockTimestamps);
|
|
680
|
+
this.logger.debug({
|
|
681
|
+
at: "BundleDataClient#loadData",
|
|
682
|
+
message: "Bundle block timestamps",
|
|
683
|
+
bundleBlockTimestamps,
|
|
684
|
+
blockRangesForChains: JSON.stringify(blockRangesForChains),
|
|
685
|
+
});
|
|
686
|
+
} else {
|
|
687
|
+
bundleBlockTimestamps = _cachedBundleTimestamps;
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
/** *****************************
|
|
691
|
+
*
|
|
692
|
+
* Handle V3 events
|
|
693
|
+
*
|
|
694
|
+
* *****************************/
|
|
695
|
+
|
|
696
|
+
// The methodology here is roughly as follows
|
|
697
|
+
// - Query all deposits from SpokePoolClients
|
|
698
|
+
// - If deposit is in origin chain block range, add it to bundleDepositsV3
|
|
699
|
+
// - If deposit is expired or from an older bundle, stash it away as a deposit that may require an expired
|
|
700
|
+
// deposit refund.
|
|
701
|
+
// - Query fills from SpokePoolClients
|
|
702
|
+
// - If fill is in destination chain block range, then validate fill
|
|
703
|
+
// - Fill is valid if its RelayData hash is identical to a deposit's relay data hash that we've already seen.
|
|
704
|
+
// If we haven't seen a deposit with a matching hash, then we need to query for an older deposit earlier than
|
|
705
|
+
// the SpokePoolClient's lookback window via queryHistoricalDepositForFill().
|
|
706
|
+
// - If fill is valid, then add it to bundleFillsV3. If it's a slow fill execution, we won't
|
|
707
|
+
// add a relayer refund for it, but all fills accumulate realized LP fees.
|
|
708
|
+
// - If fill replaced a slow fill request, then stash it away as one that potentially created an
|
|
709
|
+
// unexecutable slow fill.
|
|
710
|
+
// - Query slow fills from SpokePoolClients
|
|
711
|
+
// - If slow fill is in destination chain block range, then validate slow fill
|
|
712
|
+
// - Slow fill is valid if its RelayData hash is identical to a deposit's relay data hash that we've already seen,
|
|
713
|
+
// and it does not match with a Fill that we've seen, and its input and output tokens are equivalent,
|
|
714
|
+
// and the deposit that is being slow filled has not expired.
|
|
715
|
+
// - Note that if we haven't can't match the slow fill with a deposit, then we need to query for an older
|
|
716
|
+
// deposit earlier than the SpokePoolClient's lookback window via queryHistoricalDepositForFill().
|
|
717
|
+
// - input and output tokens are considered equivalent if they map to the same L1 token via a PoolRebalanceRoute
|
|
718
|
+
// at the deposit.quoteBlockNumber.
|
|
719
|
+
// - To validate fills that replaced slow fills, we should check that there is no slow fill request in the
|
|
720
|
+
// current destination chain bundle block range with a matching relay hash. Additionally, the
|
|
721
|
+
// fast fill replacing a slow fill must have filled a slow-fill eligible deposit meaning that
|
|
722
|
+
// its input and output tokens are equivalent. We don't need to check that the slow fill was created
|
|
723
|
+
// before the deposit expired by definition because the deposit was fast-filled, meaning that it did not
|
|
724
|
+
// expire.
|
|
725
|
+
// - To validate deposits in the current bundle block range that expired newly in this destination
|
|
726
|
+
// chain's current bundle block range, we only have to check that the deposit was not filled in the current
|
|
727
|
+
// destination chain block range.
|
|
728
|
+
// - To validate deposits from a prior bundle that expired newly, we need to make sure that the deposit
|
|
729
|
+
// was not filled. If we can't find a fill, then we should check its FillStatus on-chain via eth_call.
|
|
730
|
+
// This will return either Unfilled, RequestedSlowFill, or Filled. If the deposit is Filled, then
|
|
731
|
+
// then the fill happened a long time ago and we should do nothing. If the deposit is Unfilled, then
|
|
732
|
+
// we should refund it as an expired deposit. If the deposit is RequestedSlowFill then we need to validate
|
|
733
|
+
// that the deposit is eligible for a slow fill (its input and output tokens are equivalent) and that
|
|
734
|
+
// the slow fill request was not sent in the current destination chain's bundle block range.
|
|
735
|
+
|
|
736
|
+
// Using the above rules, we will create a list of:
|
|
737
|
+
// - deposits in the current bundle
|
|
738
|
+
// - fast fills to refund in the current bundle
|
|
739
|
+
// - fills creating bundle LP fees in the current bundle
|
|
740
|
+
// - slow fills to create for the current bundle
|
|
741
|
+
// - deposits that expired in the current bundle
|
|
742
|
+
|
|
743
|
+
// Use this dictionary to conveniently unite all events with the same relay data hash which will make
|
|
744
|
+
// secondary lookups faster. The goal is to lazily fill up this dictionary with all events in the SpokePool
|
|
745
|
+
// client's in-memory event cache.
|
|
746
|
+
const v3RelayHashes: {
|
|
747
|
+
[relayHash: string]: {
|
|
748
|
+
// Note: Since there are no partial fills in v3, there should only be one fill per relay hash.
|
|
749
|
+
// There should also only be one deposit per relay hash since deposit ID's can't be re-used on the
|
|
750
|
+
// same spoke pool. Moreover, the SpokePool blocks multiple slow fill requests, so
|
|
751
|
+
// there should also only be one slow fill request per relay hash.
|
|
752
|
+
deposit?: V3DepositWithBlock;
|
|
753
|
+
fill?: V3FillWithBlock;
|
|
754
|
+
slowFillRequest?: SlowFillRequestWithBlock;
|
|
755
|
+
};
|
|
756
|
+
} = {};
|
|
757
|
+
|
|
758
|
+
// Process all deposits first and keep track of deposits that may be refunded as an expired deposit:
|
|
759
|
+
// - expiredBundleDepositHashes: Deposits sent in this bundle that expired.
|
|
760
|
+
const expiredBundleDepositHashes: Set<string> = new Set<string>();
|
|
761
|
+
// - olderDepositHashes: Deposits sent in a prior bundle that newly expired in this bundle
|
|
762
|
+
const olderDepositHashes: Set<string> = new Set<string>();
|
|
763
|
+
|
|
764
|
+
let depositCounter = 0;
|
|
765
|
+
for (const originChainId of allChainIds) {
|
|
766
|
+
const originClient = spokePoolClients[originChainId];
|
|
767
|
+
const originChainBlockRange = getBlockRangeForChain(blockRangesForChains, originChainId, chainIds);
|
|
768
|
+
|
|
769
|
+
for (const destinationChainId of allChainIds) {
|
|
770
|
+
if (originChainId === destinationChainId) {
|
|
771
|
+
continue;
|
|
772
|
+
}
|
|
773
|
+
originClient.getDepositsForDestinationChain(destinationChainId).forEach((deposit) => {
|
|
774
|
+
depositCounter++;
|
|
775
|
+
const relayDataHash = this.getRelayHashFromEvent(deposit);
|
|
776
|
+
if (v3RelayHashes[relayDataHash]) {
|
|
777
|
+
// If we've seen this deposit before, then skip this deposit. This can happen if our RPC provider
|
|
778
|
+
// gives us bad data.
|
|
779
|
+
return;
|
|
780
|
+
}
|
|
781
|
+
// Even if deposit is not in bundle block range, store all deposits we can see in memory in this
|
|
782
|
+
// convenient dictionary.
|
|
783
|
+
v3RelayHashes[relayDataHash] = {
|
|
784
|
+
deposit: deposit,
|
|
785
|
+
fill: undefined,
|
|
786
|
+
slowFillRequest: undefined,
|
|
787
|
+
};
|
|
788
|
+
|
|
789
|
+
// If deposit block is within origin chain bundle block range, then save as bundle deposit.
|
|
790
|
+
// If deposit is in bundle and it has expired, additionally save it as an expired deposit.
|
|
791
|
+
// If deposit is not in the bundle block range, then save it as an older deposit that
|
|
792
|
+
// may have expired.
|
|
793
|
+
if (deposit.blockNumber >= originChainBlockRange[0] && deposit.blockNumber <= originChainBlockRange[1]) {
|
|
794
|
+
// Deposit is a V3 deposit in this origin chain's bundle block range and is not a duplicate.
|
|
795
|
+
updateBundleDepositsV3(bundleDepositsV3, deposit);
|
|
796
|
+
// We don't check that fillDeadline >= bundleBlockTimestamps[destinationChainId][0] because
|
|
797
|
+
// that would eliminate any deposits in this bundle with a very low fillDeadline like equal to 0
|
|
798
|
+
// for example. Those should be impossible to create but technically should be included in this
|
|
799
|
+
// bundle of refunded deposits.
|
|
800
|
+
if (deposit.fillDeadline < bundleBlockTimestamps[destinationChainId][1]) {
|
|
801
|
+
expiredBundleDepositHashes.add(relayDataHash);
|
|
802
|
+
}
|
|
803
|
+
} else if (deposit.blockNumber < originChainBlockRange[0]) {
|
|
804
|
+
olderDepositHashes.add(relayDataHash);
|
|
805
|
+
}
|
|
806
|
+
});
|
|
807
|
+
}
|
|
808
|
+
}
|
|
809
|
+
this.logger.debug({
|
|
810
|
+
at: "BundleDataClient#loadData",
|
|
811
|
+
message: `Processed ${depositCounter} deposits in ${performance.now() - start}ms.`,
|
|
812
|
+
});
|
|
813
|
+
start = performance.now();
|
|
814
|
+
|
|
815
|
+
// Process fills now that we've populated relay hash dictionary with deposits:
|
|
816
|
+
const validatedBundleV3Fills: (V3FillWithBlock & { quoteTimestamp: number })[] = [];
|
|
817
|
+
const validatedBundleSlowFills: V3DepositWithBlock[] = [];
|
|
818
|
+
const validatedBundleUnexecutableSlowFills: V3DepositWithBlock[] = [];
|
|
819
|
+
let fillCounter = 0;
|
|
820
|
+
for (const originChainId of allChainIds) {
|
|
821
|
+
const originClient = spokePoolClients[originChainId];
|
|
822
|
+
for (const destinationChainId of allChainIds) {
|
|
823
|
+
if (originChainId === destinationChainId) {
|
|
824
|
+
continue;
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
const destinationClient = spokePoolClients[destinationChainId];
|
|
828
|
+
const destinationChainBlockRange = getBlockRangeForChain(blockRangesForChains, destinationChainId, chainIds);
|
|
829
|
+
|
|
830
|
+
// Keep track of fast fills that replaced slow fills, which we'll use to create "unexecutable" slow fills
|
|
831
|
+
// if the slow fill request was sent in a prior bundle.
|
|
832
|
+
const fastFillsReplacingSlowFills: string[] = [];
|
|
833
|
+
await forEachAsync(
|
|
834
|
+
destinationClient
|
|
835
|
+
.getFillsForOriginChain(originChainId)
|
|
836
|
+
.filter((fill) => fill.blockNumber <= destinationChainBlockRange[1]),
|
|
837
|
+
async (fill) => {
|
|
838
|
+
const relayDataHash = this.getRelayHashFromEvent(fill);
|
|
839
|
+
fillCounter++;
|
|
840
|
+
|
|
841
|
+
if (v3RelayHashes[relayDataHash]) {
|
|
842
|
+
if (!v3RelayHashes[relayDataHash].fill) {
|
|
843
|
+
assert(
|
|
844
|
+
isDefined(v3RelayHashes[relayDataHash].deposit),
|
|
845
|
+
"Deposit should exist in relay hash dictionary."
|
|
846
|
+
);
|
|
847
|
+
// At this point, the v3RelayHashes entry already existed meaning that there is a matching deposit,
|
|
848
|
+
// so this fill is validated.
|
|
849
|
+
v3RelayHashes[relayDataHash].fill = fill;
|
|
850
|
+
if (fill.blockNumber >= destinationChainBlockRange[0]) {
|
|
851
|
+
validatedBundleV3Fills.push({
|
|
852
|
+
...fill,
|
|
853
|
+
quoteTimestamp: v3RelayHashes[relayDataHash].deposit!.quoteTimestamp, // ! due to assert above
|
|
854
|
+
});
|
|
855
|
+
// If fill replaced a slow fill request, then mark it as one that might have created an
|
|
856
|
+
// unexecutable slow fill. We can't know for sure until we check the slow fill request
|
|
857
|
+
// events.
|
|
858
|
+
if (fill.relayExecutionInfo.fillType === FillType.ReplacedSlowFill) {
|
|
859
|
+
fastFillsReplacingSlowFills.push(relayDataHash);
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
return;
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
// At this point, there is no relay hash dictionary entry for this fill, so we need to
|
|
867
|
+
// instantiate the entry.
|
|
868
|
+
v3RelayHashes[relayDataHash] = {
|
|
869
|
+
deposit: undefined,
|
|
870
|
+
fill: fill,
|
|
871
|
+
slowFillRequest: undefined,
|
|
872
|
+
};
|
|
873
|
+
|
|
874
|
+
// TODO: We might be able to remove the following historical query once we deprecate the deposit()
|
|
875
|
+
// function since there won't be any old, unexpired deposits anymore assuming the spoke pool client
|
|
876
|
+
// lookbacks have been validated, which they should be before we run this function.
|
|
877
|
+
|
|
878
|
+
// Since there was no deposit matching the relay hash, we need to do a historical query for an
|
|
879
|
+
// older deposit in case the spoke pool client's lookback isn't old enough to find the matching deposit.
|
|
880
|
+
// We can skip this step if the fill's fill deadline is not infinite, because we can assume that the
|
|
881
|
+
// spoke pool clients have loaded deposits old enough to cover all fills with a non-infinite fill deadline.
|
|
882
|
+
if (fill.blockNumber >= destinationChainBlockRange[0]) {
|
|
883
|
+
// Fill has a non-infinite expiry, and we can assume our spoke pool clients have old enough deposits
|
|
884
|
+
// to conclude that this fill is invalid if we haven't found a matching deposit in memory, so
|
|
885
|
+
// skip the historical query.
|
|
886
|
+
if (!INFINITE_FILL_DEADLINE.eq(fill.fillDeadline)) {
|
|
887
|
+
bundleInvalidFillsV3.push(fill);
|
|
888
|
+
return;
|
|
889
|
+
}
|
|
890
|
+
const historicalDeposit = await queryHistoricalDepositForFill(originClient, fill);
|
|
891
|
+
if (!historicalDeposit.found) {
|
|
892
|
+
bundleInvalidFillsV3.push(fill);
|
|
893
|
+
} else {
|
|
894
|
+
const matchedDeposit = historicalDeposit.deposit;
|
|
895
|
+
// @dev Since queryHistoricalDepositForFill validates the fill by checking individual
|
|
896
|
+
// object property values against the deposit's, we
|
|
897
|
+
// sanity check it here by comparing the full relay hashes. If there's an error here then the
|
|
898
|
+
// historical deposit query is not working as expected.
|
|
899
|
+
assert(this.getRelayHashFromEvent(matchedDeposit) === relayDataHash, "Relay hashes should match.");
|
|
900
|
+
validatedBundleV3Fills.push({
|
|
901
|
+
...fill,
|
|
902
|
+
quoteTimestamp: matchedDeposit.quoteTimestamp,
|
|
903
|
+
});
|
|
904
|
+
v3RelayHashes[relayDataHash].deposit = matchedDeposit;
|
|
905
|
+
if (fill.relayExecutionInfo.fillType === FillType.ReplacedSlowFill) {
|
|
906
|
+
fastFillsReplacingSlowFills.push(relayDataHash);
|
|
907
|
+
}
|
|
908
|
+
}
|
|
909
|
+
}
|
|
910
|
+
}
|
|
911
|
+
);
|
|
912
|
+
|
|
913
|
+
await forEachAsync(
|
|
914
|
+
destinationClient
|
|
915
|
+
.getSlowFillRequestsForOriginChain(originChainId)
|
|
916
|
+
.filter((request) => request.blockNumber <= destinationChainBlockRange[1]),
|
|
917
|
+
async (slowFillRequest: SlowFillRequestWithBlock) => {
|
|
918
|
+
const relayDataHash = this.getRelayHashFromEvent(slowFillRequest);
|
|
919
|
+
|
|
920
|
+
if (v3RelayHashes[relayDataHash]) {
|
|
921
|
+
if (!v3RelayHashes[relayDataHash].slowFillRequest) {
|
|
922
|
+
// At this point, the v3RelayHashes entry already existed meaning that there is either a matching
|
|
923
|
+
// fill or deposit.
|
|
924
|
+
v3RelayHashes[relayDataHash].slowFillRequest = slowFillRequest;
|
|
925
|
+
if (v3RelayHashes[relayDataHash].fill) {
|
|
926
|
+
// If there is a fill matching the relay hash, then this slow fill request can't be used
|
|
927
|
+
// to create a slow fill for a filled deposit.
|
|
928
|
+
return;
|
|
929
|
+
}
|
|
930
|
+
assert(
|
|
931
|
+
isDefined(v3RelayHashes[relayDataHash].deposit),
|
|
932
|
+
"Deposit should exist in relay hash dictionary."
|
|
933
|
+
);
|
|
934
|
+
// The ! is safe here because we've already checked that the deposit exists in the relay hash dictionary.
|
|
935
|
+
const matchedDeposit = v3RelayHashes[relayDataHash].deposit!;
|
|
936
|
+
|
|
937
|
+
// Input and Output tokens must be equivalent on the deposit for this to be slow filled.
|
|
938
|
+
if (
|
|
939
|
+
!this.clients.hubPoolClient.areTokensEquivalent(
|
|
940
|
+
matchedDeposit.inputToken,
|
|
941
|
+
matchedDeposit.originChainId,
|
|
942
|
+
matchedDeposit.outputToken,
|
|
943
|
+
matchedDeposit.destinationChainId,
|
|
944
|
+
matchedDeposit.quoteBlockNumber
|
|
945
|
+
)
|
|
946
|
+
) {
|
|
947
|
+
return;
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
// slow fill requests for deposits from or to lite chains are considered invalid
|
|
951
|
+
if (
|
|
952
|
+
v3RelayHashes[relayDataHash].deposit?.fromLiteChain ||
|
|
953
|
+
v3RelayHashes[relayDataHash].deposit?.toLiteChain
|
|
954
|
+
) {
|
|
955
|
+
return;
|
|
956
|
+
}
|
|
957
|
+
|
|
958
|
+
// If there is no fill matching the relay hash, then this might be a valid slow fill request
|
|
959
|
+
// that we should produce a slow fill leaf for. Check if the slow fill request is in the
|
|
960
|
+
// destination chain block range and that the underlying deposit has not expired yet.
|
|
961
|
+
if (
|
|
962
|
+
slowFillRequest.blockNumber >= destinationChainBlockRange[0] &&
|
|
963
|
+
// Deposit must not have expired in this bundle.
|
|
964
|
+
slowFillRequest.fillDeadline >= bundleBlockTimestamps[destinationChainId][1]
|
|
965
|
+
) {
|
|
966
|
+
// At this point, the v3RelayHashes entry already existed meaning that there is a matching deposit,
|
|
967
|
+
// so this slow fill request relay data is correct.
|
|
968
|
+
validatedBundleSlowFills.push(matchedDeposit);
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
return;
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
// Instantiate dictionary if there is neither a deposit nor fill matching it.
|
|
975
|
+
v3RelayHashes[relayDataHash] = {
|
|
976
|
+
deposit: undefined,
|
|
977
|
+
fill: undefined,
|
|
978
|
+
slowFillRequest: slowFillRequest,
|
|
979
|
+
};
|
|
980
|
+
|
|
981
|
+
// TODO: We might be able to remove the following historical query once we deprecate the deposit()
|
|
982
|
+
// function since there won't be any old, unexpired deposits anymore assuming the spoke pool client
|
|
983
|
+
// lookbacks have been validated, which they should be before we run this function.
|
|
984
|
+
|
|
985
|
+
// Since there was no deposit matching the relay hash, we need to do a historical query for an
|
|
986
|
+
// older deposit in case the spoke pool client's lookback isn't old enough to find the matching deposit.
|
|
987
|
+
// We can skip this step if the deposit's fill deadline is not infinite, because we can assume that the
|
|
988
|
+
// spoke pool clients have loaded deposits old enough to cover all fills with a non-infinite fill deadline.
|
|
989
|
+
if (
|
|
990
|
+
INFINITE_FILL_DEADLINE.eq(slowFillRequest.fillDeadline) &&
|
|
991
|
+
slowFillRequest.blockNumber >= destinationChainBlockRange[0]
|
|
992
|
+
) {
|
|
993
|
+
const historicalDeposit = await queryHistoricalDepositForFill(originClient, slowFillRequest);
|
|
994
|
+
if (!historicalDeposit.found) {
|
|
995
|
+
// TODO: Invalid slow fill request. Maybe worth logging.
|
|
996
|
+
return;
|
|
997
|
+
}
|
|
998
|
+
const matchedDeposit: V3DepositWithBlock = historicalDeposit.deposit;
|
|
999
|
+
// @dev Since queryHistoricalDepositForFill validates the slow fill request by checking individual
|
|
1000
|
+
// object property values against the deposit's, we
|
|
1001
|
+
// sanity check it here by comparing the full relay hashes. If there's an error here then the
|
|
1002
|
+
// historical deposit query is not working as expected.
|
|
1003
|
+
assert(
|
|
1004
|
+
this.getRelayHashFromEvent(matchedDeposit) === relayDataHash,
|
|
1005
|
+
"Deposit relay hashes should match."
|
|
1006
|
+
);
|
|
1007
|
+
|
|
1008
|
+
// slow fill requests for deposits from or to lite chains are considered invalid
|
|
1009
|
+
if (matchedDeposit.fromLiteChain || matchedDeposit.toLiteChain) {
|
|
1010
|
+
return;
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
v3RelayHashes[relayDataHash].deposit = matchedDeposit;
|
|
1014
|
+
|
|
1015
|
+
// Note: we don't need to query for a historical fill at this point because a fill
|
|
1016
|
+
// cannot precede a slow fill request and if the fill came after the slow fill request,
|
|
1017
|
+
// we would have seen it already because we would have processed it in the loop above.
|
|
1018
|
+
if (
|
|
1019
|
+
// Input and Output tokens must be equivalent on the deposit for this to be slow filled.
|
|
1020
|
+
!this.clients.hubPoolClient.areTokensEquivalent(
|
|
1021
|
+
matchedDeposit.inputToken,
|
|
1022
|
+
matchedDeposit.originChainId,
|
|
1023
|
+
matchedDeposit.outputToken,
|
|
1024
|
+
matchedDeposit.destinationChainId,
|
|
1025
|
+
matchedDeposit.quoteBlockNumber
|
|
1026
|
+
) ||
|
|
1027
|
+
// Deposit must not have expired in this bundle.
|
|
1028
|
+
slowFillRequest.fillDeadline < bundleBlockTimestamps[destinationChainId][1]
|
|
1029
|
+
) {
|
|
1030
|
+
// TODO: Invalid slow fill request. Maybe worth logging.
|
|
1031
|
+
return;
|
|
1032
|
+
}
|
|
1033
|
+
validatedBundleSlowFills.push(matchedDeposit);
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
);
|
|
1037
|
+
|
|
1038
|
+
// For all fills that came after a slow fill request, we can now check if the slow fill request
|
|
1039
|
+
// was a valid one and whether it was created in a previous bundle. If so, then it created a slow fill
|
|
1040
|
+
// leaf that is now unexecutable.
|
|
1041
|
+
fastFillsReplacingSlowFills.forEach((relayDataHash) => {
|
|
1042
|
+
const { deposit, slowFillRequest, fill } = v3RelayHashes[relayDataHash];
|
|
1043
|
+
assert(
|
|
1044
|
+
fill?.relayExecutionInfo.fillType === FillType.ReplacedSlowFill,
|
|
1045
|
+
"Fill type should be ReplacedSlowFill."
|
|
1046
|
+
);
|
|
1047
|
+
// Needed for TSC - are implicitely checking that deposit exists by making it to this point.
|
|
1048
|
+
if (!deposit) {
|
|
1049
|
+
throw new Error("Deposit should exist in relay hash dictionary.");
|
|
1050
|
+
}
|
|
1051
|
+
const destinationBlockRange = getBlockRangeForChain(blockRangesForChains, destinationChainId, chainIds);
|
|
1052
|
+
if (
|
|
1053
|
+
// If the slow fill request that was replaced by this fill was in an older bundle, then we don't
|
|
1054
|
+
// need to check if the slow fill request was valid since we can assume all bundles in the past
|
|
1055
|
+
// were validated. However, we might as well double check.
|
|
1056
|
+
this.clients.hubPoolClient.areTokensEquivalent(
|
|
1057
|
+
deposit.inputToken,
|
|
1058
|
+
deposit.originChainId,
|
|
1059
|
+
deposit.outputToken,
|
|
1060
|
+
deposit.destinationChainId,
|
|
1061
|
+
deposit.quoteBlockNumber
|
|
1062
|
+
) &&
|
|
1063
|
+
// If there is a slow fill request in this bundle that matches the relay hash, then there was no slow fill
|
|
1064
|
+
// created that would be considered excess.
|
|
1065
|
+
(!slowFillRequest || slowFillRequest.blockNumber < destinationBlockRange[0])
|
|
1066
|
+
) {
|
|
1067
|
+
validatedBundleUnexecutableSlowFills.push(deposit);
|
|
1068
|
+
}
|
|
1069
|
+
});
|
|
1070
|
+
}
|
|
1071
|
+
}
|
|
1072
|
+
this.logger.debug({
|
|
1073
|
+
at: "BundleDataClient#loadData",
|
|
1074
|
+
message: `Processed ${fillCounter} fills in ${performance.now() - start}ms.`,
|
|
1075
|
+
});
|
|
1076
|
+
start = performance.now();
|
|
1077
|
+
|
|
1078
|
+
// Go through expired deposits in this bundle and now prune those that we have seen a fill for to construct
|
|
1079
|
+
// the list of expired deposits we need to refund in this bundle.
|
|
1080
|
+
expiredBundleDepositHashes.forEach((relayDataHash) => {
|
|
1081
|
+
const { deposit, fill } = v3RelayHashes[relayDataHash];
|
|
1082
|
+
assert(isDefined(deposit), "Deposit should exist in relay hash dictionary.");
|
|
1083
|
+
if (
|
|
1084
|
+
!fill &&
|
|
1085
|
+
isDefined(deposit) // Needed for TSC - we check this above.
|
|
1086
|
+
) {
|
|
1087
|
+
updateExpiredDepositsV3(expiredDepositsToRefundV3, deposit);
|
|
1088
|
+
}
|
|
1089
|
+
});
|
|
1090
|
+
|
|
1091
|
+
// For all deposits older than this bundle, we need to check if they expired in this bundle and if they did,
|
|
1092
|
+
// whether there was a slow fill created for it in a previous bundle that is now unexecutable and replaced
|
|
1093
|
+
// by a new expired deposit refund.
|
|
1094
|
+
await forEachAsync(Array.from(olderDepositHashes), async (relayDataHash) => {
|
|
1095
|
+
const { deposit, slowFillRequest, fill } = v3RelayHashes[relayDataHash];
|
|
1096
|
+
assert(isDefined(deposit), "Deposit should exist in relay hash dictionary.");
|
|
1097
|
+
const { destinationChainId } = deposit!;
|
|
1098
|
+
const destinationBlockRange = getBlockRangeForChain(blockRangesForChains, destinationChainId, chainIds);
|
|
1099
|
+
|
|
1100
|
+
// Only look for deposits that were mined before this bundle and that are newly expired.
|
|
1101
|
+
// If the fill deadline is lower than the bundle start block on the destination chain, then
|
|
1102
|
+
// we should assume it was marked "newly expired" and refunded in a previous bundle.
|
|
1103
|
+
if (
|
|
1104
|
+
// If there is a valid fill that we saw matching this deposit, then it does not need a refund.
|
|
1105
|
+
!fill &&
|
|
1106
|
+
isDefined(deposit) && // Needed for TSC - we check this above.
|
|
1107
|
+
deposit.fillDeadline < bundleBlockTimestamps[destinationChainId][1] &&
|
|
1108
|
+
deposit.fillDeadline >= bundleBlockTimestamps[destinationChainId][0] &&
|
|
1109
|
+
spokePoolClients[destinationChainId] !== undefined
|
|
1110
|
+
) {
|
|
1111
|
+
// If we haven't seen a fill matching this deposit, then we need to rule out that it was filled a long time ago
|
|
1112
|
+
// by checkings its on-chain fill status.
|
|
1113
|
+
const fillStatus = await relayFillStatus(
|
|
1114
|
+
spokePoolClients[destinationChainId].spokePool,
|
|
1115
|
+
deposit,
|
|
1116
|
+
// We can assume that in production
|
|
1117
|
+
// the block ranges passed into this function would never contain blocks where the spoke pool client
|
|
1118
|
+
// hasn't queried. This is because this function will usually be called
|
|
1119
|
+
// in production with block ranges that were validated by
|
|
1120
|
+
// DataworkerUtils.blockRangesAreInvalidForSpokeClients
|
|
1121
|
+
Math.min(destinationBlockRange[1], spokePoolClients[destinationChainId].latestBlockSearched),
|
|
1122
|
+
destinationChainId
|
|
1123
|
+
);
|
|
1124
|
+
|
|
1125
|
+
// If there is no matching fill and the deposit expired in this bundle and the fill status on-chain is not
|
|
1126
|
+
// Filled, then we can to refund it as an expired deposit.
|
|
1127
|
+
if (fillStatus !== FillStatus.Filled) {
|
|
1128
|
+
updateExpiredDepositsV3(expiredDepositsToRefundV3, deposit);
|
|
1129
|
+
}
|
|
1130
|
+
// If fill status is RequestedSlowFill, then we might need to mark down an unexecutable
|
|
1131
|
+
// slow fill that we're going to replace with an expired deposit refund.
|
|
1132
|
+
// If deposit cannot be slow filled, then exit early.
|
|
1133
|
+
if (fillStatus !== FillStatus.RequestedSlowFill) {
|
|
1134
|
+
return;
|
|
1135
|
+
}
|
|
1136
|
+
// Now, check if there was a slow fill created for this deposit in a previous bundle which would now be
|
|
1137
|
+
// unexecutable. Mark this deposit as having created an unexecutable slow fill if there is no matching
|
|
1138
|
+
// slow fill request or the matching slow fill request took place in a previous bundle.
|
|
1139
|
+
|
|
1140
|
+
// If there is a slow fill request in this bundle, then the expired deposit refund will supercede
|
|
1141
|
+
// the slow fill request. If there is no slow fill request seen or its older than this bundle, then we can
|
|
1142
|
+
// assume a slow fill leaf was created for it because its tokens are equivalent. The slow fill request was
|
|
1143
|
+
// also sent before the fill deadline expired since we checked that above.
|
|
1144
|
+
if (
|
|
1145
|
+
// Since this deposit was requested for a slow fill in an older bundle at this point, we don't
|
|
1146
|
+
// technically need to check if the slow fill request was valid since we can assume all bundles in the past
|
|
1147
|
+
// were validated. However, we might as well double check.
|
|
1148
|
+
this.clients.hubPoolClient.areTokensEquivalent(
|
|
1149
|
+
deposit.inputToken,
|
|
1150
|
+
deposit.originChainId,
|
|
1151
|
+
deposit.outputToken,
|
|
1152
|
+
deposit.destinationChainId,
|
|
1153
|
+
deposit.quoteBlockNumber
|
|
1154
|
+
) &&
|
|
1155
|
+
(!slowFillRequest || slowFillRequest.blockNumber < destinationBlockRange[0])
|
|
1156
|
+
) {
|
|
1157
|
+
validatedBundleUnexecutableSlowFills.push(deposit);
|
|
1158
|
+
}
|
|
1159
|
+
}
|
|
1160
|
+
});
|
|
1161
|
+
|
|
1162
|
+
// Batch compute V3 lp fees.
|
|
1163
|
+
start = performance.now();
|
|
1164
|
+
const promises = [
|
|
1165
|
+
validatedBundleV3Fills.length > 0
|
|
1166
|
+
? this.clients.hubPoolClient.batchComputeRealizedLpFeePct(
|
|
1167
|
+
validatedBundleV3Fills.map((fill) => {
|
|
1168
|
+
const matchedDeposit = v3RelayHashes[this.getRelayHashFromEvent(fill)].deposit;
|
|
1169
|
+
assert(isDefined(matchedDeposit), "Deposit should exist in relay hash dictionary.");
|
|
1170
|
+
const { chainToSendRefundTo: paymentChainId } = getRefundInformationFromFill(
|
|
1171
|
+
fill,
|
|
1172
|
+
this.clients.hubPoolClient,
|
|
1173
|
+
blockRangesForChains,
|
|
1174
|
+
chainIds,
|
|
1175
|
+
matchedDeposit!.fromLiteChain
|
|
1176
|
+
);
|
|
1177
|
+
return {
|
|
1178
|
+
...fill,
|
|
1179
|
+
paymentChainId,
|
|
1180
|
+
};
|
|
1181
|
+
})
|
|
1182
|
+
)
|
|
1183
|
+
: [],
|
|
1184
|
+
validatedBundleSlowFills.length > 0
|
|
1185
|
+
? this.clients.hubPoolClient.batchComputeRealizedLpFeePct(
|
|
1186
|
+
validatedBundleSlowFills.map((deposit) => {
|
|
1187
|
+
return {
|
|
1188
|
+
...deposit,
|
|
1189
|
+
paymentChainId: deposit.destinationChainId,
|
|
1190
|
+
};
|
|
1191
|
+
})
|
|
1192
|
+
)
|
|
1193
|
+
: [],
|
|
1194
|
+
validatedBundleUnexecutableSlowFills.length > 0
|
|
1195
|
+
? this.clients.hubPoolClient.batchComputeRealizedLpFeePct(
|
|
1196
|
+
validatedBundleUnexecutableSlowFills.map((deposit) => {
|
|
1197
|
+
return {
|
|
1198
|
+
...deposit,
|
|
1199
|
+
paymentChainId: deposit.destinationChainId,
|
|
1200
|
+
};
|
|
1201
|
+
})
|
|
1202
|
+
)
|
|
1203
|
+
: [],
|
|
1204
|
+
];
|
|
1205
|
+
const [v3FillLpFees, v3SlowFillLpFees, v3UnexecutableSlowFillLpFees] = await Promise.all(promises);
|
|
1206
|
+
this.logger.debug({
|
|
1207
|
+
at: "BundleDataClient#loadData",
|
|
1208
|
+
message: `Computed batch async LP fees in ${performance.now() - start}ms.`,
|
|
1209
|
+
});
|
|
1210
|
+
v3FillLpFees.forEach(({ realizedLpFeePct }, idx) => {
|
|
1211
|
+
const fill = validatedBundleV3Fills[idx];
|
|
1212
|
+
const associatedDeposit = v3RelayHashes[this.getRelayHashFromEvent(fill)].deposit;
|
|
1213
|
+
assert(isDefined(associatedDeposit), "Deposit should exist in relay hash dictionary.");
|
|
1214
|
+
const { chainToSendRefundTo, repaymentToken } = getRefundInformationFromFill(
|
|
1215
|
+
fill,
|
|
1216
|
+
this.clients.hubPoolClient,
|
|
1217
|
+
blockRangesForChains,
|
|
1218
|
+
chainIds,
|
|
1219
|
+
associatedDeposit!.fromLiteChain
|
|
1220
|
+
);
|
|
1221
|
+
updateBundleFillsV3(bundleFillsV3, fill, realizedLpFeePct, chainToSendRefundTo, repaymentToken);
|
|
1222
|
+
});
|
|
1223
|
+
v3SlowFillLpFees.forEach(({ realizedLpFeePct: lpFeePct }, idx) => {
|
|
1224
|
+
const deposit = validatedBundleSlowFills[idx];
|
|
1225
|
+
updateBundleSlowFills(bundleSlowFillsV3, { ...deposit, lpFeePct });
|
|
1226
|
+
});
|
|
1227
|
+
v3UnexecutableSlowFillLpFees.forEach(({ realizedLpFeePct: lpFeePct }, idx) => {
|
|
1228
|
+
const deposit = validatedBundleUnexecutableSlowFills[idx];
|
|
1229
|
+
updateBundleExcessSlowFills(unexecutableSlowFills, { ...deposit, lpFeePct });
|
|
1230
|
+
});
|
|
1231
|
+
|
|
1232
|
+
const v3SpokeEventsReadable = prettyPrintV3SpokePoolEvents(
|
|
1233
|
+
bundleDepositsV3,
|
|
1234
|
+
bundleFillsV3,
|
|
1235
|
+
bundleInvalidFillsV3,
|
|
1236
|
+
bundleSlowFillsV3,
|
|
1237
|
+
expiredDepositsToRefundV3,
|
|
1238
|
+
unexecutableSlowFills
|
|
1239
|
+
);
|
|
1240
|
+
|
|
1241
|
+
if (bundleInvalidFillsV3.length > 0) {
|
|
1242
|
+
this.logger.debug({
|
|
1243
|
+
at: "BundleDataClient#loadData",
|
|
1244
|
+
message: "Finished loading V3 spoke pool data and found some invalid V3 fills in range",
|
|
1245
|
+
blockRangesForChains,
|
|
1246
|
+
bundleInvalidFillsV3,
|
|
1247
|
+
});
|
|
1248
|
+
}
|
|
1249
|
+
|
|
1250
|
+
this.logger.debug({
|
|
1251
|
+
at: "BundleDataClient#loadDataFromScratch",
|
|
1252
|
+
message: `Computed bundle data in ${Math.round(performance.now() - start) / 1000}s.`,
|
|
1253
|
+
blockRangesForChains: JSON.stringify(blockRangesForChains),
|
|
1254
|
+
v3SpokeEventsReadable,
|
|
1255
|
+
});
|
|
1256
|
+
return {
|
|
1257
|
+
bundleDepositsV3,
|
|
1258
|
+
expiredDepositsToRefundV3,
|
|
1259
|
+
bundleFillsV3,
|
|
1260
|
+
unexecutableSlowFills,
|
|
1261
|
+
bundleSlowFillsV3,
|
|
1262
|
+
};
|
|
1263
|
+
}
|
|
1264
|
+
|
|
1265
|
+
// Internal function to uniquely identify a bridge event. This is preferred over `SDK.getRelayDataHash` which returns
|
|
1266
|
+
// keccak256 hash of the relay data, which can be used as input into the on-chain `fillStatuses()` function in the
|
|
1267
|
+
// spoke pool contract. However, this internal function is used to uniquely identify a bridging event
|
|
1268
|
+
// for speed since its easier to build a string from the event data than to hash it.
|
|
1269
|
+
private getRelayHashFromEvent(event: V3DepositWithBlock | V3FillWithBlock | SlowFillRequestWithBlock): string {
|
|
1270
|
+
return `${event.depositor}-${event.recipient}-${event.exclusiveRelayer}-${event.inputToken}-${event.outputToken}-${event.inputAmount}-${event.outputAmount}-${event.originChainId}-${event.depositId}-${event.fillDeadline}-${event.exclusivityDeadline}-${event.message}-${event.destinationChainId}`;
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1273
|
+
async getBundleBlockTimestamps(
|
|
1274
|
+
chainIds: number[],
|
|
1275
|
+
blockRangesForChains: number[][],
|
|
1276
|
+
spokePoolClients: SpokePoolClientsByChain
|
|
1277
|
+
): Promise<{ [chainId: string]: number[] }> {
|
|
1278
|
+
return Object.fromEntries(
|
|
1279
|
+
(
|
|
1280
|
+
await mapAsync(chainIds, async (chainId, index) => {
|
|
1281
|
+
const blockRangeForChain = blockRangesForChains[index];
|
|
1282
|
+
if (!isDefined(blockRangeForChain) || isChainDisabled(blockRangeForChain)) {
|
|
1283
|
+
return;
|
|
1284
|
+
}
|
|
1285
|
+
const [_startBlockForChain, _endBlockForChain] = blockRangeForChain;
|
|
1286
|
+
const spokePoolClient = spokePoolClients[chainId];
|
|
1287
|
+
|
|
1288
|
+
// Relayer instances using the BundleDataClient for repayment estimates may only relay on a subset of chains.
|
|
1289
|
+
if (!isDefined(spokePoolClient)) {
|
|
1290
|
+
return;
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
// We can assume that in production the block ranges passed into this function would never
|
|
1294
|
+
// contain blocks where the spoke pool client hasn't queried. This is because this function
|
|
1295
|
+
// will usually be called in production with block ranges that were validated by
|
|
1296
|
+
// DataworkerUtils.blockRangesAreInvalidForSpokeClients.
|
|
1297
|
+
const startBlockForChain = Math.min(_startBlockForChain, spokePoolClient.latestBlockSearched);
|
|
1298
|
+
const endBlockForChain = Math.min(_endBlockForChain, spokePoolClient.latestBlockSearched);
|
|
1299
|
+
const [startTime, endTime] = [
|
|
1300
|
+
Number((await spokePoolClient.spokePool.provider.getBlock(startBlockForChain)).timestamp),
|
|
1301
|
+
Number((await spokePoolClient.spokePool.provider.getBlock(endBlockForChain)).timestamp),
|
|
1302
|
+
];
|
|
1303
|
+
// Sanity checks:
|
|
1304
|
+
assert(endTime >= startTime, "End time should be greater than start time.");
|
|
1305
|
+
assert(startTime > 0, "Start time should be greater than 0.");
|
|
1306
|
+
return [chainId, [startTime, endTime]];
|
|
1307
|
+
})
|
|
1308
|
+
).filter(isDefined)
|
|
1309
|
+
);
|
|
1310
|
+
}
|
|
1311
|
+
}
|