ddx-python 1.0.4__cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddx/.gitignore +1 -0
- ddx/__init__.py +58 -0
- ddx/_rust/__init__.pyi +2685 -0
- ddx/_rust/common/__init__.pyi +17 -0
- ddx/_rust/common/accounting.pyi +6 -0
- ddx/_rust/common/enums.pyi +3 -0
- ddx/_rust/common/requests/__init__.pyi +23 -0
- ddx/_rust/common/requests/intents.pyi +19 -0
- ddx/_rust/common/specs.pyi +17 -0
- ddx/_rust/common/state/__init__.pyi +41 -0
- ddx/_rust/common/state/keys.pyi +29 -0
- ddx/_rust/common/transactions.pyi +7 -0
- ddx/_rust/decimal.pyi +3 -0
- ddx/_rust/h256.pyi +3 -0
- ddx/_rust.abi3.so +0 -0
- ddx/app_config/ethereum/addresses.json +526 -0
- ddx/auditor/README.md +32 -0
- ddx/auditor/__init__.py +0 -0
- ddx/auditor/auditor_driver.py +1043 -0
- ddx/auditor/websocket_message.py +54 -0
- ddx/common/__init__.py +0 -0
- ddx/common/epoch_params.py +28 -0
- ddx/common/fill_context.py +141 -0
- ddx/common/logging.py +184 -0
- ddx/common/market_aware_account.py +259 -0
- ddx/common/market_specs.py +64 -0
- ddx/common/trade_mining_params.py +19 -0
- ddx/common/transaction_utils.py +85 -0
- ddx/common/transactions/__init__.py +0 -0
- ddx/common/transactions/advance_epoch.py +91 -0
- ddx/common/transactions/advance_settlement_epoch.py +63 -0
- ddx/common/transactions/all_price_checkpoints.py +84 -0
- ddx/common/transactions/cancel.py +76 -0
- ddx/common/transactions/cancel_all.py +88 -0
- ddx/common/transactions/complete_fill.py +103 -0
- ddx/common/transactions/disaster_recovery.py +96 -0
- ddx/common/transactions/event.py +48 -0
- ddx/common/transactions/fee_distribution.py +119 -0
- ddx/common/transactions/funding.py +292 -0
- ddx/common/transactions/futures_expiry.py +123 -0
- ddx/common/transactions/genesis.py +108 -0
- ddx/common/transactions/inner/__init__.py +0 -0
- ddx/common/transactions/inner/adl_outcome.py +25 -0
- ddx/common/transactions/inner/fill.py +232 -0
- ddx/common/transactions/inner/liquidated_position.py +41 -0
- ddx/common/transactions/inner/liquidation_entry.py +41 -0
- ddx/common/transactions/inner/liquidation_fill.py +118 -0
- ddx/common/transactions/inner/outcome.py +32 -0
- ddx/common/transactions/inner/trade_fill.py +292 -0
- ddx/common/transactions/insurance_fund_update.py +138 -0
- ddx/common/transactions/insurance_fund_withdraw.py +100 -0
- ddx/common/transactions/liquidation.py +353 -0
- ddx/common/transactions/partial_fill.py +125 -0
- ddx/common/transactions/pnl_realization.py +120 -0
- ddx/common/transactions/post.py +72 -0
- ddx/common/transactions/post_order.py +95 -0
- ddx/common/transactions/price_checkpoint.py +97 -0
- ddx/common/transactions/signer_registered.py +62 -0
- ddx/common/transactions/specs_update.py +61 -0
- ddx/common/transactions/strategy_update.py +158 -0
- ddx/common/transactions/tradable_product_update.py +98 -0
- ddx/common/transactions/trade_mining.py +147 -0
- ddx/common/transactions/trader_update.py +131 -0
- ddx/common/transactions/withdraw.py +90 -0
- ddx/common/transactions/withdraw_ddx.py +74 -0
- ddx/common/utils.py +176 -0
- ddx/config.py +17 -0
- ddx/derivadex_client.py +270 -0
- ddx/models/__init__.py +0 -0
- ddx/models/base.py +132 -0
- ddx/py.typed +0 -0
- ddx/realtime_client/__init__.py +2 -0
- ddx/realtime_client/config.py +2 -0
- ddx/realtime_client/models/__init__.py +611 -0
- ddx/realtime_client/realtime_client.py +646 -0
- ddx/rest_client/__init__.py +0 -0
- ddx/rest_client/clients/__init__.py +0 -0
- ddx/rest_client/clients/base_client.py +60 -0
- ddx/rest_client/clients/market_client.py +1243 -0
- ddx/rest_client/clients/on_chain_client.py +439 -0
- ddx/rest_client/clients/signed_client.py +292 -0
- ddx/rest_client/clients/system_client.py +843 -0
- ddx/rest_client/clients/trade_client.py +357 -0
- ddx/rest_client/constants/__init__.py +0 -0
- ddx/rest_client/constants/endpoints.py +66 -0
- ddx/rest_client/contracts/__init__.py +0 -0
- ddx/rest_client/contracts/checkpoint/__init__.py +560 -0
- ddx/rest_client/contracts/ddx/__init__.py +1949 -0
- ddx/rest_client/contracts/dummy_token/__init__.py +1014 -0
- ddx/rest_client/contracts/i_collateral/__init__.py +1414 -0
- ddx/rest_client/contracts/i_stake/__init__.py +696 -0
- ddx/rest_client/exceptions/__init__.py +0 -0
- ddx/rest_client/exceptions/exceptions.py +32 -0
- ddx/rest_client/http/__init__.py +0 -0
- ddx/rest_client/http/http_client.py +336 -0
- ddx/rest_client/models/__init__.py +0 -0
- ddx/rest_client/models/market.py +693 -0
- ddx/rest_client/models/signed.py +61 -0
- ddx/rest_client/models/system.py +311 -0
- ddx/rest_client/models/trade.py +185 -0
- ddx/rest_client/utils/__init__.py +0 -0
- ddx/rest_client/utils/encryption_utils.py +26 -0
- ddx/utils/__init__.py +0 -0
- ddx_python-1.0.4.dist-info/METADATA +63 -0
- ddx_python-1.0.4.dist-info/RECORD +106 -0
- ddx_python-1.0.4.dist-info/WHEEL +5 -0
|
@@ -0,0 +1,1043 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AuditorDriver module
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import datetime
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from collections.abc import AsyncIterable
|
|
9
|
+
from typing import Optional, Type, TypeVar
|
|
10
|
+
import os
|
|
11
|
+
import requests
|
|
12
|
+
import simplejson as json
|
|
13
|
+
import websockets
|
|
14
|
+
from websockets import WebSocketClientProtocol
|
|
15
|
+
from web3.auto import w3
|
|
16
|
+
|
|
17
|
+
from ddx._rust.common import ProductSymbol
|
|
18
|
+
from ddx._rust.common.state import DerivadexSMT, Item, ItemKind, Price
|
|
19
|
+
from ddx._rust.common.state.keys import (
|
|
20
|
+
BookOrderKey,
|
|
21
|
+
InsuranceFundKey,
|
|
22
|
+
PositionKey,
|
|
23
|
+
PriceKey,
|
|
24
|
+
StrategyKey,
|
|
25
|
+
TraderKey,
|
|
26
|
+
)
|
|
27
|
+
from ddx._rust.decimal import Decimal
|
|
28
|
+
from ddx._rust.h256 import H256
|
|
29
|
+
|
|
30
|
+
from ddx.common.epoch_params import EpochParams
|
|
31
|
+
from ddx.common.logging import CHECKMARK, auditor_logger
|
|
32
|
+
from ddx.common.trade_mining_params import TradeMiningParams
|
|
33
|
+
from ddx.common.transactions.advance_epoch import AdvanceEpoch
|
|
34
|
+
from ddx.common.transactions.advance_settlement_epoch import AdvanceSettlementEpoch
|
|
35
|
+
from ddx.common.transactions.all_price_checkpoints import AllPriceCheckpoints
|
|
36
|
+
from ddx.common.transactions.cancel import Cancel
|
|
37
|
+
from ddx.common.transactions.cancel_all import CancelAll
|
|
38
|
+
from ddx.common.transactions.complete_fill import CompleteFill
|
|
39
|
+
from ddx.common.transactions.disaster_recovery import DisasterRecovery
|
|
40
|
+
from ddx.common.transactions.event import Event
|
|
41
|
+
from ddx.common.transactions.fee_distribution import FeeDistribution
|
|
42
|
+
from ddx.common.transactions.funding import Funding
|
|
43
|
+
from ddx.common.transactions.futures_expiry import FuturesExpiry
|
|
44
|
+
from ddx.common.transactions.genesis import Genesis
|
|
45
|
+
from ddx.common.transactions.insurance_fund_update import InsuranceFundUpdate
|
|
46
|
+
from ddx.common.transactions.insurance_fund_withdraw import InsuranceFundWithdraw
|
|
47
|
+
from ddx.common.transactions.liquidation import Liquidation
|
|
48
|
+
from ddx.common.transactions.partial_fill import PartialFill
|
|
49
|
+
from ddx.common.transactions.pnl_realization import PnlRealization
|
|
50
|
+
from ddx.common.transactions.post_order import PostOrder
|
|
51
|
+
from ddx.common.transactions.signer_registered import SignerRegistered
|
|
52
|
+
from ddx.common.transactions.specs_update import SpecsUpdate
|
|
53
|
+
from ddx.common.transactions.strategy_update import StrategyUpdate
|
|
54
|
+
from ddx.common.transactions.tradable_product_update import TradableProductUpdate
|
|
55
|
+
from ddx.common.transactions.trade_mining import TradeMining
|
|
56
|
+
from ddx.common.transactions.trader_update import TraderUpdate
|
|
57
|
+
from ddx.common.transactions.withdraw import Withdraw
|
|
58
|
+
from ddx.common.transactions.withdraw_ddx import WithdrawDDX
|
|
59
|
+
from ddx.common.utils import get_parsed_tx_log_entry, ComplexOutputEncoder
|
|
60
|
+
from ddx.auditor.websocket_message import WebsocketEventType, WebsocketMessage
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
logger = auditor_logger(__name__)
|
|
64
|
+
|
|
65
|
+
EventT = TypeVar("EventT", bound=Event)
|
|
66
|
+
RAW_TYPE_TO_EVENT_TYPE: dict[str, Type[EventT]] = {
|
|
67
|
+
"Post": PostOrder,
|
|
68
|
+
"CompleteFill": CompleteFill,
|
|
69
|
+
"PartialFill": PartialFill,
|
|
70
|
+
"Liquidation": Liquidation,
|
|
71
|
+
"Cancel": Cancel,
|
|
72
|
+
"CancelAll": CancelAll,
|
|
73
|
+
"StrategyUpdate": StrategyUpdate,
|
|
74
|
+
"TraderUpdate": TraderUpdate,
|
|
75
|
+
"PriceCheckpoint": AllPriceCheckpoints,
|
|
76
|
+
"PnlRealization": PnlRealization,
|
|
77
|
+
"Funding": Funding,
|
|
78
|
+
"FuturesExpiry": FuturesExpiry,
|
|
79
|
+
"TradeMining": TradeMining,
|
|
80
|
+
"Withdraw": Withdraw,
|
|
81
|
+
"WithdrawDDX": WithdrawDDX,
|
|
82
|
+
"InsuranceFundWithdraw": InsuranceFundWithdraw,
|
|
83
|
+
"Genesis": Genesis,
|
|
84
|
+
"AdvanceEpoch": AdvanceEpoch,
|
|
85
|
+
"AdvanceSettlementEpoch": AdvanceSettlementEpoch,
|
|
86
|
+
"InsuranceFundUpdate": InsuranceFundUpdate,
|
|
87
|
+
"DisasterRecovery": DisasterRecovery,
|
|
88
|
+
"FeeDistribution": FeeDistribution,
|
|
89
|
+
"SignerRegistered": SignerRegistered,
|
|
90
|
+
"SpecsUpdate": SpecsUpdate,
|
|
91
|
+
"TradableProductUpdate": TradableProductUpdate,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def empty_queue(q: asyncio.Queue):
|
|
96
|
+
for _ in range(q.qsize()):
|
|
97
|
+
# Depending on your program, you may want to
|
|
98
|
+
# catch QueueEmpty
|
|
99
|
+
q.get_nowait()
|
|
100
|
+
q.task_done()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class AuditorDriver:
|
|
104
|
+
"""
|
|
105
|
+
Defines an AuditorDriver.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def __init__(
|
|
109
|
+
self,
|
|
110
|
+
webserver_url: str,
|
|
111
|
+
genesis_params: dict,
|
|
112
|
+
epoch_params: EpochParams,
|
|
113
|
+
trade_mining_params: TradeMiningParams,
|
|
114
|
+
collateral_tranches: list[tuple[Decimal, Decimal]],
|
|
115
|
+
contract_deployment: str,
|
|
116
|
+
):
|
|
117
|
+
"""
|
|
118
|
+
Initialize an AuditorDriver. An Auditor allows any third-party to
|
|
119
|
+
process a state snapshot of the DerivaDEX Sparse Merkle Tree (SMT)
|
|
120
|
+
and transaction log entries to validate the integrity of the
|
|
121
|
+
exchange. The driver essentially maintains its own SMT and can
|
|
122
|
+
transition its state upon receiving transaction log entries. The
|
|
123
|
+
root hashes must match.
|
|
124
|
+
|
|
125
|
+
Parameters
|
|
126
|
+
----------
|
|
127
|
+
webserver_url: str
|
|
128
|
+
Operator hostname
|
|
129
|
+
epoch_params: EpochParams
|
|
130
|
+
Epoch parameters
|
|
131
|
+
collateral_tranches: list[tuple[Decimal, Decimal]]
|
|
132
|
+
Collateral guards tranches
|
|
133
|
+
genesis_params : dict
|
|
134
|
+
Genesis params for the environment
|
|
135
|
+
contract_deployment: str
|
|
136
|
+
Contract deployment name, e.g. geth
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
self.webserver_url = webserver_url
|
|
140
|
+
self.contract_deployment = contract_deployment
|
|
141
|
+
self.epoch_params = epoch_params
|
|
142
|
+
self.trade_mining_params = trade_mining_params
|
|
143
|
+
self.collateral_tranches = collateral_tranches
|
|
144
|
+
self.genesis_params = genesis_params
|
|
145
|
+
|
|
146
|
+
def _reset(self):
|
|
147
|
+
# Initialize an empty SMT
|
|
148
|
+
self.smt = DerivadexSMT()
|
|
149
|
+
|
|
150
|
+
# Initialize latest price leaves. These
|
|
151
|
+
# technically are abstractions above the SMT for easier/faster
|
|
152
|
+
# access for a trader client
|
|
153
|
+
self.latest_price_leaves: dict[ProductSymbol, tuple[PriceKey, Price]] = {}
|
|
154
|
+
|
|
155
|
+
# Initialize a data construct for pending transaction log
|
|
156
|
+
# entries. We maintain a backlog of pending transaction log entries
|
|
157
|
+
# in this scenario so that although we may receive transaction log
|
|
158
|
+
# entries out of order, we will always apply them to the SMT in order.
|
|
159
|
+
self.pending_tx_log_entries = defaultdict(dict)
|
|
160
|
+
|
|
161
|
+
# Current root hash derived locally. For every transaction event
|
|
162
|
+
# emitted by the transaction log, the state root hash prior to
|
|
163
|
+
# the transaction being applied. As such, we maintain the
|
|
164
|
+
# current root hash to compare against the next inbound
|
|
165
|
+
# transaction log's root hash.
|
|
166
|
+
self.current_state_root_hash = (
|
|
167
|
+
"0x0000000000000000000000000000000000000000000000000000000000000000"
|
|
168
|
+
)
|
|
169
|
+
self.current_batch_state_root_hash = (
|
|
170
|
+
"0x0000000000000000000000000000000000000000000000000000000000000000"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
self.first_head = True
|
|
174
|
+
self._snapshot_received = False
|
|
175
|
+
|
|
176
|
+
# More Pythonic ask-for-forgiveness approaches for the queues
|
|
177
|
+
# and event below
|
|
178
|
+
|
|
179
|
+
# set up an asyncio queue for messages that will be added by
|
|
180
|
+
# the Auditor and popped to send to the API
|
|
181
|
+
try:
|
|
182
|
+
empty_queue(self.api_auditor_queue)
|
|
183
|
+
except AttributeError:
|
|
184
|
+
self.api_auditor_queue = asyncio.Queue()
|
|
185
|
+
|
|
186
|
+
self.expected_epoch_id = 0
|
|
187
|
+
self.expected_tx_ordinal = 0
|
|
188
|
+
self.latest_batch_id = 0
|
|
189
|
+
|
|
190
|
+
self.is_trade_mining = (
|
|
191
|
+
lambda epoch_id: epoch_id * self.epoch_params.epoch_size
|
|
192
|
+
< self.trade_mining_params.trade_mining_length
|
|
193
|
+
* self.epoch_params.trade_mining_period
|
|
194
|
+
+ 1
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
@property
|
|
198
|
+
def smt(self):
|
|
199
|
+
return self._smt
|
|
200
|
+
|
|
201
|
+
@smt.setter
|
|
202
|
+
def smt(self, smt):
|
|
203
|
+
self._smt = smt
|
|
204
|
+
|
|
205
|
+
@property
|
|
206
|
+
def expected_epoch_id(self):
|
|
207
|
+
return self._expected_epoch_id
|
|
208
|
+
|
|
209
|
+
@expected_epoch_id.setter
|
|
210
|
+
def expected_epoch_id(self, epoch_id):
|
|
211
|
+
self._expected_epoch_id = epoch_id
|
|
212
|
+
|
|
213
|
+
@property
|
|
214
|
+
def expected_tx_ordinal(self):
|
|
215
|
+
return self._expected_tx_ordinal
|
|
216
|
+
|
|
217
|
+
@expected_tx_ordinal.setter
|
|
218
|
+
def expected_tx_ordinal(self, tx_ordinal):
|
|
219
|
+
self._expected_tx_ordinal = tx_ordinal
|
|
220
|
+
|
|
221
|
+
def process_tx(self, tx: Event, tx_log_event: dict):
|
|
222
|
+
"""
|
|
223
|
+
Process an individual transaction. This transaction will be
|
|
224
|
+
appropriately decoded into the correct Transaction type and
|
|
225
|
+
handled different to adjust the SMT.
|
|
226
|
+
|
|
227
|
+
Parameters
|
|
228
|
+
----------
|
|
229
|
+
tx : EventT
|
|
230
|
+
A transaction
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
if isinstance(tx, PostOrder):
|
|
234
|
+
# PostOrder transaction
|
|
235
|
+
tx.process_tx(
|
|
236
|
+
self.smt,
|
|
237
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
238
|
+
)
|
|
239
|
+
elif isinstance(tx, CompleteFill):
|
|
240
|
+
# CompleteFill transaction
|
|
241
|
+
tx.process_tx(
|
|
242
|
+
self.smt,
|
|
243
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
244
|
+
trade_mining_active=self.is_trade_mining(tx_log_event["epochId"]),
|
|
245
|
+
epoch_id=tx_log_event["epochId"],
|
|
246
|
+
)
|
|
247
|
+
elif isinstance(tx, PartialFill):
|
|
248
|
+
# PartialFill transaction
|
|
249
|
+
tx.process_tx(
|
|
250
|
+
self.smt,
|
|
251
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
252
|
+
trade_mining_active=self.is_trade_mining(tx_log_event["epochId"]),
|
|
253
|
+
epoch_id=tx_log_event["epochId"],
|
|
254
|
+
)
|
|
255
|
+
elif isinstance(tx, Liquidation):
|
|
256
|
+
# Liquidation transaction
|
|
257
|
+
tx.process_tx(
|
|
258
|
+
self.smt,
|
|
259
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
260
|
+
trade_mining_active=self.is_trade_mining(tx_log_event["epochId"]),
|
|
261
|
+
epoch_id=tx_log_event["epochId"],
|
|
262
|
+
)
|
|
263
|
+
elif isinstance(tx, Cancel):
|
|
264
|
+
# Cancel transaction
|
|
265
|
+
tx.process_tx(
|
|
266
|
+
self.smt,
|
|
267
|
+
)
|
|
268
|
+
elif isinstance(tx, CancelAll):
|
|
269
|
+
# CancelAll transaction
|
|
270
|
+
tx.process_tx(
|
|
271
|
+
self.smt,
|
|
272
|
+
)
|
|
273
|
+
elif isinstance(tx, StrategyUpdate):
|
|
274
|
+
# StrategyUpdate transaction
|
|
275
|
+
tx.process_tx(
|
|
276
|
+
self.smt,
|
|
277
|
+
collateral_tranches=self.collateral_tranches,
|
|
278
|
+
)
|
|
279
|
+
elif isinstance(tx, TraderUpdate):
|
|
280
|
+
# TraderUpdate transaction
|
|
281
|
+
tx.process_tx(
|
|
282
|
+
self.smt,
|
|
283
|
+
)
|
|
284
|
+
elif isinstance(tx, AllPriceCheckpoints):
|
|
285
|
+
# AllPriceCheckpoints transaction
|
|
286
|
+
tx.process_tx(
|
|
287
|
+
self.smt,
|
|
288
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
289
|
+
)
|
|
290
|
+
elif isinstance(tx, PnlRealization):
|
|
291
|
+
# PnlRealization transaction
|
|
292
|
+
tx.process_tx(
|
|
293
|
+
self.smt,
|
|
294
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
295
|
+
)
|
|
296
|
+
elif isinstance(tx, Funding):
|
|
297
|
+
# Funding transaction
|
|
298
|
+
tx.process_tx(
|
|
299
|
+
self.smt,
|
|
300
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
301
|
+
funding_period=self.epoch_params.funding_period,
|
|
302
|
+
)
|
|
303
|
+
elif isinstance(tx, FuturesExpiry):
|
|
304
|
+
# FuturesExpiry transaction
|
|
305
|
+
tx.process_tx(
|
|
306
|
+
self.smt,
|
|
307
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
308
|
+
)
|
|
309
|
+
elif isinstance(tx, TradeMining):
|
|
310
|
+
# TradeMining transaction
|
|
311
|
+
tx.process_tx(
|
|
312
|
+
self.smt,
|
|
313
|
+
trade_mining_active=self.is_trade_mining(tx_log_event["epochId"]),
|
|
314
|
+
trade_mining_reward_per_epoch=self.trade_mining_params.trade_mining_reward_per_epoch,
|
|
315
|
+
trade_mining_maker_reward_percentage=self.trade_mining_params.trade_mining_maker_reward_percentage,
|
|
316
|
+
trade_mining_taker_reward_percentage=self.trade_mining_params.trade_mining_taker_reward_percentage,
|
|
317
|
+
)
|
|
318
|
+
elif isinstance(tx, Withdraw):
|
|
319
|
+
# Withdraw transaction
|
|
320
|
+
tx.process_tx(
|
|
321
|
+
self.smt,
|
|
322
|
+
)
|
|
323
|
+
elif isinstance(tx, WithdrawDDX):
|
|
324
|
+
# WithdrawDDX transaction
|
|
325
|
+
tx.process_tx(
|
|
326
|
+
self.smt,
|
|
327
|
+
)
|
|
328
|
+
elif isinstance(tx, InsuranceFundWithdraw):
|
|
329
|
+
# InsuranceFundWithdraw transaction
|
|
330
|
+
tx.process_tx(
|
|
331
|
+
self.smt,
|
|
332
|
+
)
|
|
333
|
+
elif isinstance(tx, Genesis):
|
|
334
|
+
# Genesis transaction
|
|
335
|
+
tx.process_tx(
|
|
336
|
+
auditor_instance=self,
|
|
337
|
+
expected_epoch_id=AuditorDriver.expected_epoch_id.fset,
|
|
338
|
+
expected_tx_ordinal=AuditorDriver.expected_tx_ordinal.fset,
|
|
339
|
+
smt=AuditorDriver.smt.fset,
|
|
340
|
+
genesis_params=self.genesis_params,
|
|
341
|
+
current_time=datetime.datetime.fromtimestamp(
|
|
342
|
+
tx_log_event["timestamp"] / 1000, tz=datetime.timezone.utc
|
|
343
|
+
),
|
|
344
|
+
)
|
|
345
|
+
elif isinstance(tx, AdvanceEpoch):
|
|
346
|
+
# AdvanceEpoch transaction
|
|
347
|
+
tx.process_tx(
|
|
348
|
+
self.smt,
|
|
349
|
+
auditor_instance=self,
|
|
350
|
+
expected_epoch_id=AuditorDriver.expected_epoch_id.fset,
|
|
351
|
+
expected_tx_ordinal=AuditorDriver.expected_tx_ordinal.fset,
|
|
352
|
+
)
|
|
353
|
+
elif isinstance(tx, AdvanceSettlementEpoch):
|
|
354
|
+
# AdvanceSettlementEpoch transaction
|
|
355
|
+
tx.process_tx(
|
|
356
|
+
self.smt,
|
|
357
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
358
|
+
)
|
|
359
|
+
elif isinstance(tx, InsuranceFundUpdate):
|
|
360
|
+
# InsuranceFundUpdate transaction
|
|
361
|
+
tx.process_tx(
|
|
362
|
+
self.smt,
|
|
363
|
+
)
|
|
364
|
+
elif isinstance(tx, DisasterRecovery):
|
|
365
|
+
# DisasterRecovery transaction
|
|
366
|
+
tx.process_tx(
|
|
367
|
+
self.smt,
|
|
368
|
+
latest_price_leaves=self.latest_price_leaves,
|
|
369
|
+
)
|
|
370
|
+
elif isinstance(tx, FeeDistribution):
|
|
371
|
+
# FeeDistribution transaction
|
|
372
|
+
tx.process_tx(
|
|
373
|
+
self.smt,
|
|
374
|
+
)
|
|
375
|
+
elif isinstance(tx, SignerRegistered):
|
|
376
|
+
# SignerRegistered transaction
|
|
377
|
+
tx.process_tx(
|
|
378
|
+
self.smt,
|
|
379
|
+
)
|
|
380
|
+
elif isinstance(tx, SpecsUpdate):
|
|
381
|
+
# SpecsUpdate transaction
|
|
382
|
+
tx.process_tx(
|
|
383
|
+
self.smt,
|
|
384
|
+
)
|
|
385
|
+
elif isinstance(tx, TradableProductUpdate):
|
|
386
|
+
# TradableProductUpdate transaction
|
|
387
|
+
tx.process_tx(
|
|
388
|
+
self.smt,
|
|
389
|
+
)
|
|
390
|
+
else:
|
|
391
|
+
raise RuntimeError("Unhandled SMT transaction type: " + type(tx))
|
|
392
|
+
|
|
393
|
+
def process_tx_log_event(
|
|
394
|
+
self, tx_log_event: dict, suppress_trader_queue: bool
|
|
395
|
+
) -> list:
|
|
396
|
+
"""
|
|
397
|
+
Process an individual transaction log entry. Each entry will be
|
|
398
|
+
appropriately decoded into the correct Transaction type and
|
|
399
|
+
handled differently to adjust the SMT.
|
|
400
|
+
|
|
401
|
+
Parameters
|
|
402
|
+
----------
|
|
403
|
+
tx_log_event : dict
|
|
404
|
+
A transaction log event
|
|
405
|
+
suppress_trader_queue : bool
|
|
406
|
+
Suppress trader queue messages
|
|
407
|
+
"""
|
|
408
|
+
|
|
409
|
+
processed_txs = []
|
|
410
|
+
# Add the transaction log event to the pending transaction log
|
|
411
|
+
# entries to be processed either now or later
|
|
412
|
+
self.pending_tx_log_entries[tx_log_event["epochId"]][
|
|
413
|
+
tx_log_event["txOrdinal"]
|
|
414
|
+
] = tx_log_event
|
|
415
|
+
|
|
416
|
+
# Loop through all the pending transaction log entries that
|
|
417
|
+
# should be processed now given the expected epoch ID and
|
|
418
|
+
# transaction ordinal
|
|
419
|
+
while (
|
|
420
|
+
self.expected_epoch_id in self.pending_tx_log_entries
|
|
421
|
+
and self.expected_tx_ordinal
|
|
422
|
+
in self.pending_tx_log_entries[self.expected_epoch_id]
|
|
423
|
+
):
|
|
424
|
+
# Retrieve the transaction log entry event that should be
|
|
425
|
+
# processed now
|
|
426
|
+
tx_log_event = self.pending_tx_log_entries[self.expected_epoch_id].pop(
|
|
427
|
+
self.expected_tx_ordinal
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
def decode_tx():
|
|
431
|
+
event_type = tx_log_event["event"]["t"]
|
|
432
|
+
if event_type == "EpochMarker":
|
|
433
|
+
event_type = tx_log_event["event"]["c"]["kind"]
|
|
434
|
+
return event_type, RAW_TYPE_TO_EVENT_TYPE[event_type].decode_value_into_cls(
|
|
435
|
+
tx_log_event
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
if (
|
|
439
|
+
tx_log_event["batchId"] == self.latest_batch_id
|
|
440
|
+
and tx_log_event["stateRootHash"] != self.current_batch_state_root_hash
|
|
441
|
+
):
|
|
442
|
+
raise RuntimeError(
|
|
443
|
+
f"Tx log root hash ({tx_log_event['stateRootHash']}) != current batch root hash ({self.current_batch_state_root_hash}"
|
|
444
|
+
)
|
|
445
|
+
elif (
|
|
446
|
+
tx_log_event["batchId"] != self.latest_batch_id
|
|
447
|
+
and tx_log_event["stateRootHash"] != self.current_state_root_hash
|
|
448
|
+
):
|
|
449
|
+
logger.info("state root mismatch detected - shutting down operators")
|
|
450
|
+
if self.contract_deployment == "geth":
|
|
451
|
+
self.shutdown_operators()
|
|
452
|
+
|
|
453
|
+
logger.error(
|
|
454
|
+
f"smt leaves before request {tx_log_event['requestIndex']} (result of request {tx_log_event['requestIndex'] - 1}, dump THIS request from the operator): {[(str(key), value.abi_encoded_value().hex()) for key, value in self.smt.all_leaves()]}\n\nHuman readable:\n{str(self.smt.all_leaves())}"
|
|
455
|
+
)
|
|
456
|
+
try:
|
|
457
|
+
req_idx = tx_log_event.get("requestIndex", "unknown")
|
|
458
|
+
dump_path = f"/tmp/auditor_mismatch_request_{req_idx}.json"
|
|
459
|
+
dump_payload = {
|
|
460
|
+
"requestIndex": req_idx,
|
|
461
|
+
"currentStateRootHash": self.current_state_root_hash,
|
|
462
|
+
"txLogStateRootHash": tx_log_event.get("stateRootHash"),
|
|
463
|
+
"txLogEvent": tx_log_event,
|
|
464
|
+
"auditorLeaves": {
|
|
465
|
+
str(key): "0x" + value.abi_encoded_value().hex()
|
|
466
|
+
for key, value in self.smt.all_leaves()
|
|
467
|
+
},
|
|
468
|
+
}
|
|
469
|
+
with open(dump_path, "w") as f:
|
|
470
|
+
json.dump(dump_payload, f, cls=ComplexOutputEncoder)
|
|
471
|
+
logger.error("wrote auditor mismatch dump to %s", dump_path)
|
|
472
|
+
except Exception as e:
|
|
473
|
+
logger.error("failed to write auditor mismatch dump: %s", e)
|
|
474
|
+
|
|
475
|
+
raise RuntimeError(
|
|
476
|
+
f"Tx log root hash ({tx_log_event['stateRootHash']}) != current root hash ({self.current_state_root_hash}"
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
# Extract the transaction type from the event (e.g. Post,
|
|
480
|
+
# CompleteFill, etc.)
|
|
481
|
+
tx_type, tx = decode_tx()
|
|
482
|
+
|
|
483
|
+
logger.success(
|
|
484
|
+
f"{CHECKMARK} - processing ({tx_type}; tx log root hash ({tx_log_event['stateRootHash']}) == current root hash ({self.current_state_root_hash}; tx ({tx_log_event})"
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
self.process_tx(tx, tx_log_event)
|
|
488
|
+
|
|
489
|
+
# set the current state root hash locally
|
|
490
|
+
self.current_state_root_hash = f"0x{self.smt.root().as_bytes().hex()}"
|
|
491
|
+
|
|
492
|
+
if self.latest_batch_id != tx_log_event["batchId"]:
|
|
493
|
+
self.current_batch_state_root_hash = tx_log_event["stateRootHash"]
|
|
494
|
+
self.latest_batch_id = tx_log_event["batchId"]
|
|
495
|
+
|
|
496
|
+
# Increment the expected transaction ordinal by 1 (will be
|
|
497
|
+
# reset back to 0 only when the epoch advances)
|
|
498
|
+
self.expected_tx_ordinal += 1
|
|
499
|
+
logger.success(
|
|
500
|
+
f"{CHECKMARK * 2} - processed {tx_type}; arrived at new state root hash ({self.current_state_root_hash})"
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
processed_txs.append(tx)
|
|
504
|
+
|
|
505
|
+
return processed_txs
|
|
506
|
+
|
|
507
|
+
def process_state_snapshot(
|
|
508
|
+
self, expected_epoch_id: int, state_snapshot: dict
|
|
509
|
+
) -> None:
|
|
510
|
+
"""
|
|
511
|
+
Process a state snapshot and initialize the SMT accordingly.
|
|
512
|
+
|
|
513
|
+
Parameters
|
|
514
|
+
----------
|
|
515
|
+
expected_epoch_id : int
|
|
516
|
+
Expected epoch ID for incoming transactions after the
|
|
517
|
+
state snapshot
|
|
518
|
+
state_snapshot : dict
|
|
519
|
+
The state snapshot structured as a dictionary with the
|
|
520
|
+
format: {<hash(leaf_key, leaf_value)>, (leaf_key, leaf_value)}
|
|
521
|
+
"""
|
|
522
|
+
|
|
523
|
+
# Loop through state snapshot dictionary items
|
|
524
|
+
for state_snapshot_key, state_snapshot_value in state_snapshot.items():
|
|
525
|
+
# Compute the first and second words since with these two
|
|
526
|
+
# blocks of data, we can determine what type of leaf we are
|
|
527
|
+
# dealing with
|
|
528
|
+
|
|
529
|
+
state_snapshot_key = bytes.fromhex(state_snapshot_key[2:])
|
|
530
|
+
# Peel the item discriminant off (the first byte of the
|
|
531
|
+
# leaf key) to determine what kind of leaf it is
|
|
532
|
+
item_discriminant = ItemKind(w3.to_int(state_snapshot_key[:1]))
|
|
533
|
+
|
|
534
|
+
item = Item.abi_decode_value_into_item(
|
|
535
|
+
item_discriminant, bytes.fromhex(state_snapshot_value[2:])
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
state_snapshot_key_h256 = H256.from_bytes(state_snapshot_key)
|
|
539
|
+
self.smt.store_item_by_key(state_snapshot_key_h256, item)
|
|
540
|
+
|
|
541
|
+
if item_discriminant == ItemKind.Price:
|
|
542
|
+
# Update latest price leaves abstraction with the new
|
|
543
|
+
# price checkpoint data
|
|
544
|
+
price_key = PriceKey.decode_key(state_snapshot_key_h256)
|
|
545
|
+
price_item = Price.from_item(item)
|
|
546
|
+
# Derive the Price encoded key and H256
|
|
547
|
+
# repr
|
|
548
|
+
if (
|
|
549
|
+
price_key.symbol not in self.latest_price_leaves
|
|
550
|
+
or price_item.ordinal
|
|
551
|
+
> self.latest_price_leaves[price_key.symbol][1].ordinal
|
|
552
|
+
):
|
|
553
|
+
self.latest_price_leaves[price_key.symbol] = (
|
|
554
|
+
price_key,
|
|
555
|
+
price_item,
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
self.expected_epoch_id = expected_epoch_id
|
|
559
|
+
self.expected_tx_ordinal = 0
|
|
560
|
+
|
|
561
|
+
# ************** DATA GETTERS ************** #
|
|
562
|
+
|
|
563
|
+
def get_trader_snapshot(self, trader_address: Optional[str]) -> list[dict]:
|
|
564
|
+
"""
|
|
565
|
+
Get a snapshot of Trader leaves given a particular key.
|
|
566
|
+
|
|
567
|
+
Parameters
|
|
568
|
+
----------
|
|
569
|
+
trader_address : str
|
|
570
|
+
Trader address
|
|
571
|
+
"""
|
|
572
|
+
|
|
573
|
+
def topic_string(trader_key: TraderKey):
|
|
574
|
+
return f"{'/'.join(filter(None, ['STATE', 'TRADER', trader_key]))}/"
|
|
575
|
+
|
|
576
|
+
def encompasses_key(against_key: TraderKey):
|
|
577
|
+
if trader_address is not None:
|
|
578
|
+
return against_key.trader_address == trader_address
|
|
579
|
+
return True
|
|
580
|
+
|
|
581
|
+
if all(map(lambda x: x is not None, [trader_address])):
|
|
582
|
+
# If the topic is maximally set, we have a specific leaf
|
|
583
|
+
# we are querying, and can retrieve it from the SMT
|
|
584
|
+
# accordingly
|
|
585
|
+
|
|
586
|
+
# Return a snapshot with a single Trader leaf item
|
|
587
|
+
trader_key: TraderKey = TraderKey(trader_address)
|
|
588
|
+
return [{"t": topic_string(trader_key), "c": self.smt.trader(trader_key)}]
|
|
589
|
+
|
|
590
|
+
# Return a snapshot containing the Trader leaves obtained
|
|
591
|
+
return [
|
|
592
|
+
{
|
|
593
|
+
"t": topic_string(trader_key),
|
|
594
|
+
"c": trader,
|
|
595
|
+
}
|
|
596
|
+
for trader_key, trader in self.smt.all_traders()
|
|
597
|
+
if encompasses_key(trader_key)
|
|
598
|
+
]
|
|
599
|
+
|
|
600
|
+
def get_strategy_snapshot(
|
|
601
|
+
self, trader_address: Optional[str], strategy_id_hash: Optional[str]
|
|
602
|
+
) -> list[dict]:
|
|
603
|
+
"""
|
|
604
|
+
Get a snapshot of Strategy leaves given a particular key.
|
|
605
|
+
Parameters
|
|
606
|
+
----------
|
|
607
|
+
trader_address : str
|
|
608
|
+
Trader address
|
|
609
|
+
strategy_id_hash : str
|
|
610
|
+
Strategy ID hash
|
|
611
|
+
"""
|
|
612
|
+
|
|
613
|
+
def topic_string(strategy_key: StrategyKey):
|
|
614
|
+
return f"{'/'.join(filter(None, ['STATE', 'STRATEGY', strategy_key.trader_address, strategy_key.strategy_id_hash]))}/"
|
|
615
|
+
|
|
616
|
+
def encompasses_key(against_key: StrategyKey):
|
|
617
|
+
if strategy_id_hash is not None:
|
|
618
|
+
return (
|
|
619
|
+
against_key.trader_address == trader_address
|
|
620
|
+
and against_key.strategy_id_hash == strategy_id_hash
|
|
621
|
+
)
|
|
622
|
+
elif trader_address is not None:
|
|
623
|
+
return against_key.trader_address == trader_address
|
|
624
|
+
return True
|
|
625
|
+
|
|
626
|
+
if all(map(lambda x: x is not None, [trader_address, strategy_id_hash])):
|
|
627
|
+
# If the topic is maximally set, we have a specific leaf
|
|
628
|
+
# we are querying, and can retrieve it from the SMT
|
|
629
|
+
# accordingly
|
|
630
|
+
strategy_key: StrategyKey = StrategyKey(trader_address, strategy_id_hash)
|
|
631
|
+
# Return a snapshot with a single Strategy leaf item
|
|
632
|
+
return [
|
|
633
|
+
{"t": topic_string(strategy_key), "c": self.smt.strategy(strategy_key)}
|
|
634
|
+
]
|
|
635
|
+
|
|
636
|
+
# Return a snapshot containing the Trader leaves obtained
|
|
637
|
+
return [
|
|
638
|
+
{
|
|
639
|
+
"t": topic_string(strategy_key),
|
|
640
|
+
"c": strategy,
|
|
641
|
+
}
|
|
642
|
+
for strategy_key, strategy in self.smt.all_strategies()
|
|
643
|
+
if encompasses_key(strategy_key)
|
|
644
|
+
]
|
|
645
|
+
|
|
646
|
+
def get_position_snapshot(
|
|
647
|
+
self,
|
|
648
|
+
symbol: Optional[ProductSymbol],
|
|
649
|
+
trader_address: Optional[str],
|
|
650
|
+
strategy_id_hash: Optional[str],
|
|
651
|
+
) -> list[dict]:
|
|
652
|
+
"""
|
|
653
|
+
Get a snapshot of Position leaves given a particular key.
|
|
654
|
+
Parameters
|
|
655
|
+
----------
|
|
656
|
+
symbol : ProductSymbol
|
|
657
|
+
Product symbol
|
|
658
|
+
trader_address : str
|
|
659
|
+
Trader address
|
|
660
|
+
strategy_id_hash : str
|
|
661
|
+
Strategy ID hash
|
|
662
|
+
"""
|
|
663
|
+
|
|
664
|
+
def topic_string(position_key: PositionKey):
|
|
665
|
+
return f"{'/'.join(filter(None, ['STATE', 'POSITION', position_key.symbol, position_key.trader_address, position_key.strategy_id_hash]))}/"
|
|
666
|
+
|
|
667
|
+
def encompasses_key(against_key: PositionKey):
|
|
668
|
+
if strategy_id_hash is not None:
|
|
669
|
+
return (
|
|
670
|
+
against_key.symbol == symbol
|
|
671
|
+
and against_key.trader_address == trader_address
|
|
672
|
+
and against_key.strategy_id_hash == strategy_id_hash
|
|
673
|
+
)
|
|
674
|
+
elif trader_address is not None:
|
|
675
|
+
return (
|
|
676
|
+
against_key.symbol == symbol
|
|
677
|
+
and against_key.trader_address == trader_address
|
|
678
|
+
)
|
|
679
|
+
elif symbol is not None:
|
|
680
|
+
return against_key.symbol == symbol
|
|
681
|
+
return True
|
|
682
|
+
|
|
683
|
+
if all(
|
|
684
|
+
map(lambda x: x is not None, [symbol, trader_address, strategy_id_hash])
|
|
685
|
+
):
|
|
686
|
+
# If the topic is maximally set, we have a specific leaf
|
|
687
|
+
# we are querying, and can retrieve it from the SMT
|
|
688
|
+
# accordingly
|
|
689
|
+
position_key: PositionKey = PositionKey(
|
|
690
|
+
trader_address, strategy_id_hash, symbol
|
|
691
|
+
)
|
|
692
|
+
# Return a snapshot with a single Position leaf item
|
|
693
|
+
return [
|
|
694
|
+
{"t": topic_string(position_key), "c": self.smt.position(position_key)}
|
|
695
|
+
]
|
|
696
|
+
|
|
697
|
+
# Return a snapshot containing the Position leaves obtained
|
|
698
|
+
return [
|
|
699
|
+
{
|
|
700
|
+
"t": topic_string(position_key),
|
|
701
|
+
"c": position,
|
|
702
|
+
}
|
|
703
|
+
for position_key, position in self.smt.all_positions()
|
|
704
|
+
if encompasses_key(position_key)
|
|
705
|
+
]
|
|
706
|
+
|
|
707
|
+
def get_book_order_snapshot(
|
|
708
|
+
self,
|
|
709
|
+
symbol: Optional[ProductSymbol],
|
|
710
|
+
order_hash: Optional[str],
|
|
711
|
+
trader_address: Optional[str],
|
|
712
|
+
strategy_id_hash: Optional[str],
|
|
713
|
+
) -> list[dict]:
|
|
714
|
+
"""
|
|
715
|
+
Get a snapshot of BookOrder leaves given a particular key.
|
|
716
|
+
Parameters
|
|
717
|
+
----------
|
|
718
|
+
symbol : ProductSymbol
|
|
719
|
+
Product symbol
|
|
720
|
+
order_hash : str
|
|
721
|
+
Order hash
|
|
722
|
+
trader_address : str
|
|
723
|
+
Trader address
|
|
724
|
+
strategy_id_hash : str
|
|
725
|
+
Strategy ID hash
|
|
726
|
+
"""
|
|
727
|
+
|
|
728
|
+
def topic_string(
|
|
729
|
+
book_order_key: BookOrderKey, trader_address: str, strategy_id_hash: str
|
|
730
|
+
):
|
|
731
|
+
return f"{'/'.join(filter(None, ['STATE', 'BOOK_ORDER', book_order_key.symbol, book_order_key.order_hash, trader_address, strategy_id_hash]))}/"
|
|
732
|
+
|
|
733
|
+
def encompasses_key(
|
|
734
|
+
against_key: BookOrderKey,
|
|
735
|
+
against_trader_address: str,
|
|
736
|
+
against_strategy_id_hash: str,
|
|
737
|
+
):
|
|
738
|
+
if strategy_id_hash is not None:
|
|
739
|
+
return (
|
|
740
|
+
against_key.symbol == symbol
|
|
741
|
+
and against_trader_address == trader_address
|
|
742
|
+
and against_strategy_id_hash == strategy_id_hash
|
|
743
|
+
)
|
|
744
|
+
elif trader_address is not None:
|
|
745
|
+
return (
|
|
746
|
+
against_key.symbol == symbol
|
|
747
|
+
and against_trader_address == trader_address
|
|
748
|
+
)
|
|
749
|
+
elif symbol is not None:
|
|
750
|
+
return against_key.symbol == symbol
|
|
751
|
+
return True
|
|
752
|
+
|
|
753
|
+
if all(
|
|
754
|
+
map(
|
|
755
|
+
lambda x: x is not None,
|
|
756
|
+
[symbol, order_hash, trader_address, strategy_id_hash],
|
|
757
|
+
)
|
|
758
|
+
):
|
|
759
|
+
# If the topic is maximally set, we have a specific leaf
|
|
760
|
+
# we are querying, and can retrieve it from the SMT
|
|
761
|
+
# accordingly
|
|
762
|
+
book_order_key: BookOrderKey = BookOrderKey(symbol, order_hash)
|
|
763
|
+
# Return a snapshot with a single Position leaf item
|
|
764
|
+
return [
|
|
765
|
+
{
|
|
766
|
+
"t": topic_string(book_order_key, trader_address, strategy_id_hash),
|
|
767
|
+
"c": self.smt.book_order(book_order_key),
|
|
768
|
+
}
|
|
769
|
+
]
|
|
770
|
+
|
|
771
|
+
# Return a snapshot containing the BookOrder leaves obtained
|
|
772
|
+
return [
|
|
773
|
+
{
|
|
774
|
+
"t": topic_string(
|
|
775
|
+
book_order_key,
|
|
776
|
+
book_order.trader_address,
|
|
777
|
+
book_order.strategy_id_hash,
|
|
778
|
+
),
|
|
779
|
+
"c": book_order,
|
|
780
|
+
}
|
|
781
|
+
for book_order_key, book_order in self.smt.all_book_orders()
|
|
782
|
+
if encompasses_key(
|
|
783
|
+
book_order_key,
|
|
784
|
+
book_order.trader_address,
|
|
785
|
+
book_order.strategy_id_hash,
|
|
786
|
+
)
|
|
787
|
+
]
|
|
788
|
+
|
|
789
|
+
def get_insurance_fund_snapshot(self) -> list[dict]:
|
|
790
|
+
"""
|
|
791
|
+
Get a snapshot of the organic InsuranceFund leaf.
|
|
792
|
+
"""
|
|
793
|
+
|
|
794
|
+
# Return a snapshot containing the organic InsuranceFund
|
|
795
|
+
return [
|
|
796
|
+
{
|
|
797
|
+
"t": "STATE/INSURANCE_FUND/",
|
|
798
|
+
"c": self.smt.insurance_fund(InsuranceFundKey()),
|
|
799
|
+
}
|
|
800
|
+
]
|
|
801
|
+
|
|
802
|
+
# ************** WEBSOCKET FUNCTIONALITY ************** #
|
|
803
|
+
|
|
804
|
+
async def _handle_tx_log_update_message(self, message: dict) -> None:
|
|
805
|
+
"""
|
|
806
|
+
Handle the transaction log message received from the Trader
|
|
807
|
+
API upon subscription. This will be either the Partial (includes
|
|
808
|
+
the state snapshot SMT data as of the most recent
|
|
809
|
+
checkpoint and the transaction log entries from that point
|
|
810
|
+
up until now) or Update messages (streaming messages of
|
|
811
|
+
individual transaction log entries from this point onwards).
|
|
812
|
+
These messages are parsed to get things into the same format
|
|
813
|
+
such that the Auditor can be used as-is by the integration
|
|
814
|
+
tests as well.
|
|
815
|
+
|
|
816
|
+
Parameters
|
|
817
|
+
----------
|
|
818
|
+
message : dict
|
|
819
|
+
Transaction log update message
|
|
820
|
+
"""
|
|
821
|
+
|
|
822
|
+
if message["t"] == WebsocketEventType.SNAPSHOT:
|
|
823
|
+
# If transaction log message is of type Snapshot, we will
|
|
824
|
+
# need to process the snapshot of state leaves as of the
|
|
825
|
+
# most recent checkpoint and
|
|
826
|
+
|
|
827
|
+
# Extract the state snapshot leaves, which is the state
|
|
828
|
+
# snapshot as of the most recent completed checkpoint
|
|
829
|
+
# at the time of subscribing to the transaction log
|
|
830
|
+
parsed_state_snapshot = message["c"]["leaves"]
|
|
831
|
+
|
|
832
|
+
# Process the state snapshot
|
|
833
|
+
self.process_state_snapshot(
|
|
834
|
+
int(message["c"]["epochId"]),
|
|
835
|
+
parsed_state_snapshot,
|
|
836
|
+
)
|
|
837
|
+
|
|
838
|
+
# Mark that we've received a snapshot
|
|
839
|
+
self._snapshot_received = True
|
|
840
|
+
|
|
841
|
+
else:
|
|
842
|
+
# Parse the transaction log entries suitable for the
|
|
843
|
+
# Auditor such that it can be reused as-is by the
|
|
844
|
+
# integration tests
|
|
845
|
+
parsed_tx_log_entry = get_parsed_tx_log_entry(message["c"])
|
|
846
|
+
|
|
847
|
+
if self.first_head:
|
|
848
|
+
# If this is the first tx log entry of the head response
|
|
849
|
+
|
|
850
|
+
# Check if we're in epoch < 2 and haven't received a snapshot yet
|
|
851
|
+
if not self._snapshot_received:
|
|
852
|
+
logger.warning(
|
|
853
|
+
f"Received Head message in epoch {parsed_tx_log_entry['epochId']} without snapshot. Restarting connection to wait for epoch >= 2..."
|
|
854
|
+
)
|
|
855
|
+
raise RuntimeError(
|
|
856
|
+
f"No snapshot available in epoch {parsed_tx_log_entry['epochId']} < 2, restarting..."
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
# Initialize the current local state root hash to the SMT's root
|
|
860
|
+
# hash after having loaded the state snapshot
|
|
861
|
+
self.current_state_root_hash = f"0x{self.smt.root().as_bytes().hex()}"
|
|
862
|
+
self.current_batch_state_root_hash = self.current_state_root_hash
|
|
863
|
+
|
|
864
|
+
self.latest_batch_id = parsed_tx_log_entry["batchId"]
|
|
865
|
+
|
|
866
|
+
self.first_head = False
|
|
867
|
+
|
|
868
|
+
# Process the transaction log entries
|
|
869
|
+
self.process_tx_log_event(
|
|
870
|
+
parsed_tx_log_entry, message["t"] == WebsocketEventType.HEAD
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
async def api_auditor_consumer_handler(
|
|
874
|
+
self, websocket: WebSocketClientProtocol, path: str
|
|
875
|
+
):
|
|
876
|
+
"""
|
|
877
|
+
API <> Auditor consumer handler for messages that are received
|
|
878
|
+
by the Auditor from the API.
|
|
879
|
+
|
|
880
|
+
Parameters
|
|
881
|
+
----------
|
|
882
|
+
websocket : WebSocketServerProtocol
|
|
883
|
+
The WS connection instance between API and Auditor
|
|
884
|
+
"""
|
|
885
|
+
|
|
886
|
+
async def _inner_messages(
|
|
887
|
+
ws: websockets.WebSocketClientProtocol,
|
|
888
|
+
) -> AsyncIterable[str]:
|
|
889
|
+
try:
|
|
890
|
+
while True:
|
|
891
|
+
try:
|
|
892
|
+
msg: str = await asyncio.wait_for(ws.recv(), timeout=30.0)
|
|
893
|
+
yield msg
|
|
894
|
+
except asyncio.TimeoutError:
|
|
895
|
+
try:
|
|
896
|
+
pong_waiter = await ws.ping()
|
|
897
|
+
await asyncio.wait_for(pong_waiter, timeout=30.0)
|
|
898
|
+
except asyncio.TimeoutError:
|
|
899
|
+
raise
|
|
900
|
+
except asyncio.TimeoutError:
|
|
901
|
+
print("WebSocket ping timed out. Going to reconnect...")
|
|
902
|
+
return
|
|
903
|
+
except websockets.ConnectionClosed:
|
|
904
|
+
return
|
|
905
|
+
finally:
|
|
906
|
+
await ws.close()
|
|
907
|
+
|
|
908
|
+
# Loop through messages as they come in on the WebSocket
|
|
909
|
+
async for message in _inner_messages(websocket):
|
|
910
|
+
# JSON-serialize the inbound message
|
|
911
|
+
data = json.loads(message)
|
|
912
|
+
|
|
913
|
+
if "t" not in data:
|
|
914
|
+
# Non topical data, such as rate-limiting message
|
|
915
|
+
continue
|
|
916
|
+
|
|
917
|
+
topic = data["t"]
|
|
918
|
+
|
|
919
|
+
if topic in ["Snapshot", "Head", "Tail"]:
|
|
920
|
+
# If the message is a TxLogUpdate, this is something
|
|
921
|
+
# that should be processed by the Auditor
|
|
922
|
+
|
|
923
|
+
# Handle transaction log message
|
|
924
|
+
await self._handle_tx_log_update_message(data)
|
|
925
|
+
|
|
926
|
+
async def api_auditor_producer_handler(
|
|
927
|
+
self, websocket: WebSocketClientProtocol, path: str
|
|
928
|
+
):
|
|
929
|
+
"""
|
|
930
|
+
API <> Auditor producer handler for messages that are sent
|
|
931
|
+
from the Auditor to the API.
|
|
932
|
+
|
|
933
|
+
Parameters
|
|
934
|
+
----------
|
|
935
|
+
websocket : WebSocketServerProtocol
|
|
936
|
+
The WS connection instance between API and Auditor
|
|
937
|
+
"""
|
|
938
|
+
|
|
939
|
+
# Start things off with a subscription to the TxLogUpdate
|
|
940
|
+
# channel on the API to receive a snapshot and streaming
|
|
941
|
+
# updates to the transaction log
|
|
942
|
+
tx_log_update_subscription = WebsocketMessage(
|
|
943
|
+
"SubscribeMarket", {"events": ["TxLogUpdate"]}
|
|
944
|
+
)
|
|
945
|
+
self.api_auditor_queue.put_nowait(tx_log_update_subscription)
|
|
946
|
+
|
|
947
|
+
try:
|
|
948
|
+
while True:
|
|
949
|
+
# Receive the oldest message (FIFO) in the queue and
|
|
950
|
+
# send after serialization to the API
|
|
951
|
+
message = await self.api_auditor_queue.get()
|
|
952
|
+
await websocket.send(ComplexOutputEncoder().encode(message))
|
|
953
|
+
except websockets.ConnectionClosed:
|
|
954
|
+
print("Connection has been closed (api_auditor_producer_handler)")
|
|
955
|
+
|
|
956
|
+
async def api_auditor_server(self):
|
|
957
|
+
"""
|
|
958
|
+
sets up the DerivaDEX API <> Auditor server with consumer and
|
|
959
|
+
producer tasks. The consumer is when the Auditor receives
|
|
960
|
+
messages from the API, and the producer is when the Auditor
|
|
961
|
+
sends messages to the API.
|
|
962
|
+
"""
|
|
963
|
+
|
|
964
|
+
def _generate_uri_token():
|
|
965
|
+
"""
|
|
966
|
+
Generate URI token to connect to the API
|
|
967
|
+
"""
|
|
968
|
+
|
|
969
|
+
# Construct and return WS connection url with format
|
|
970
|
+
return f"{self.webserver_url.replace('http','ws',1)}/v2/txlog"
|
|
971
|
+
|
|
972
|
+
while True:
|
|
973
|
+
try:
|
|
974
|
+
# set up a WS context connection given a specific URI
|
|
975
|
+
async with websockets.connect(
|
|
976
|
+
_generate_uri_token(),
|
|
977
|
+
max_size=2**32,
|
|
978
|
+
ping_timeout=None,
|
|
979
|
+
) as websocket_client:
|
|
980
|
+
try:
|
|
981
|
+
# set up the consumer
|
|
982
|
+
consumer_task = asyncio.ensure_future(
|
|
983
|
+
self.api_auditor_consumer_handler(websocket_client, None)
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
# set up the producer
|
|
987
|
+
"""
|
|
988
|
+
producer_task = asyncio.ensure_future(
|
|
989
|
+
self.api_auditor_producer_handler(websocket_client, None)
|
|
990
|
+
)
|
|
991
|
+
"""
|
|
992
|
+
|
|
993
|
+
# These should essentially run forever unless one of them
|
|
994
|
+
# is stopped for some reason
|
|
995
|
+
done, pending = await asyncio.wait(
|
|
996
|
+
[consumer_task],
|
|
997
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
998
|
+
)
|
|
999
|
+
for task in pending:
|
|
1000
|
+
task.cancel()
|
|
1001
|
+
finally:
|
|
1002
|
+
logger.info(f"API <> Auditor server outer loop restarting")
|
|
1003
|
+
|
|
1004
|
+
await asyncio.sleep(30.0)
|
|
1005
|
+
|
|
1006
|
+
self._reset()
|
|
1007
|
+
continue
|
|
1008
|
+
|
|
1009
|
+
except asyncio.CancelledError:
|
|
1010
|
+
raise
|
|
1011
|
+
except Exception as e:
|
|
1012
|
+
print(
|
|
1013
|
+
f"Unexpected error with WebSocket connection: {e}. Retrying after 30 seconds...",
|
|
1014
|
+
)
|
|
1015
|
+
await asyncio.sleep(30.0)
|
|
1016
|
+
|
|
1017
|
+
# Reset Auditor state upon reconnection
|
|
1018
|
+
self._reset()
|
|
1019
|
+
|
|
1020
|
+
def shutdown_operators(self):
|
|
1021
|
+
node_urls = [
|
|
1022
|
+
url.strip("/")
|
|
1023
|
+
for url in requests.get(f"{self.webserver_url}/v2/status")
|
|
1024
|
+
.json()["raftMetrics"]["nodes"]
|
|
1025
|
+
.values()
|
|
1026
|
+
]
|
|
1027
|
+
|
|
1028
|
+
for url in node_urls:
|
|
1029
|
+
r = requests.get(f"{url}/v2/shutdown")
|
|
1030
|
+
logger.info(f"shutting down operator node at {url} succeeded: {r.ok}")
|
|
1031
|
+
|
|
1032
|
+
# ************** ASYNCIO ENTRYPOINT ************** #
|
|
1033
|
+
|
|
1034
|
+
async def main(self):
|
|
1035
|
+
"""
|
|
1036
|
+
Main entry point for the Auditor. It sets up the various
|
|
1037
|
+
coroutines to run on the event loop - API <> auditor WS server.
|
|
1038
|
+
"""
|
|
1039
|
+
|
|
1040
|
+
# Initialize parameters inside event loop
|
|
1041
|
+
self._reset()
|
|
1042
|
+
|
|
1043
|
+
await asyncio.gather(self.api_auditor_server())
|