csm-dashboard 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- csm_dashboard-0.2.0.dist-info/METADATA +354 -0
- csm_dashboard-0.2.0.dist-info/RECORD +27 -0
- csm_dashboard-0.2.0.dist-info/WHEEL +4 -0
- csm_dashboard-0.2.0.dist-info/entry_points.txt +2 -0
- src/__init__.py +1 -0
- src/cli/__init__.py +1 -0
- src/cli/commands.py +624 -0
- src/core/__init__.py +1 -0
- src/core/config.py +42 -0
- src/core/contracts.py +19 -0
- src/core/types.py +153 -0
- src/data/__init__.py +1 -0
- src/data/beacon.py +370 -0
- src/data/cache.py +67 -0
- src/data/etherscan.py +78 -0
- src/data/ipfs_logs.py +267 -0
- src/data/known_cids.py +30 -0
- src/data/lido_api.py +35 -0
- src/data/onchain.py +258 -0
- src/data/rewards_tree.py +58 -0
- src/data/strikes.py +214 -0
- src/main.py +39 -0
- src/services/__init__.py +1 -0
- src/services/operator_service.py +320 -0
- src/web/__init__.py +1 -0
- src/web/app.py +576 -0
- src/web/routes.py +161 -0
src/data/ipfs_logs.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
"""IPFS distribution log fetching with persistent caching."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from decimal import Decimal
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from ..core.config import get_settings
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class FrameData:
|
|
16
|
+
"""Data from a single distribution frame."""
|
|
17
|
+
|
|
18
|
+
start_epoch: int
|
|
19
|
+
end_epoch: int
|
|
20
|
+
log_cid: str
|
|
21
|
+
block_number: int
|
|
22
|
+
distributed_rewards: int # For specific operator, in wei
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class IPFSLogProvider:
|
|
26
|
+
"""Fetches and caches historical distribution logs from IPFS."""
|
|
27
|
+
|
|
28
|
+
# IPFS gateways to try in order
|
|
29
|
+
GATEWAYS = [
|
|
30
|
+
"https://dweb.link/ipfs/",
|
|
31
|
+
"https://ipfs.io/ipfs/",
|
|
32
|
+
"https://cloudflare-ipfs.com/ipfs/",
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
# Rate limiting: minimum seconds between gateway requests
|
|
36
|
+
MIN_REQUEST_INTERVAL = 1.0
|
|
37
|
+
|
|
38
|
+
def __init__(self, cache_dir: Path | None = None):
|
|
39
|
+
self.settings = get_settings()
|
|
40
|
+
self.cache_dir = cache_dir or Path.home() / ".cache" / "csm-dashboard" / "ipfs"
|
|
41
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
42
|
+
self._last_request_time = 0.0
|
|
43
|
+
|
|
44
|
+
def _get_cache_path(self, cid: str) -> Path:
|
|
45
|
+
"""Get the cache file path for a CID."""
|
|
46
|
+
return self.cache_dir / f"{cid}.json"
|
|
47
|
+
|
|
48
|
+
def _load_from_cache(self, cid: str) -> dict | None:
|
|
49
|
+
"""Load log data from local cache if available."""
|
|
50
|
+
cache_path = self._get_cache_path(cid)
|
|
51
|
+
if cache_path.exists():
|
|
52
|
+
try:
|
|
53
|
+
with open(cache_path) as f:
|
|
54
|
+
return json.load(f)
|
|
55
|
+
except (json.JSONDecodeError, OSError):
|
|
56
|
+
# Corrupted cache, remove it
|
|
57
|
+
cache_path.unlink(missing_ok=True)
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
def _save_to_cache(self, cid: str, data: dict) -> None:
|
|
61
|
+
"""Save log data to local cache."""
|
|
62
|
+
cache_path = self._get_cache_path(cid)
|
|
63
|
+
try:
|
|
64
|
+
with open(cache_path, "w") as f:
|
|
65
|
+
json.dump(data, f)
|
|
66
|
+
except OSError:
|
|
67
|
+
pass # Cache write failure is non-fatal
|
|
68
|
+
|
|
69
|
+
def _rate_limit(self) -> None:
|
|
70
|
+
"""Ensure minimum interval between IPFS gateway requests."""
|
|
71
|
+
now = time.time()
|
|
72
|
+
elapsed = now - self._last_request_time
|
|
73
|
+
if elapsed < self.MIN_REQUEST_INTERVAL:
|
|
74
|
+
time.sleep(self.MIN_REQUEST_INTERVAL - elapsed)
|
|
75
|
+
self._last_request_time = time.time()
|
|
76
|
+
|
|
77
|
+
async def fetch_log(self, cid: str) -> dict | None:
|
|
78
|
+
"""
|
|
79
|
+
Fetch and parse a distribution log from IPFS.
|
|
80
|
+
|
|
81
|
+
Checks local cache first, then tries IPFS gateways.
|
|
82
|
+
Returns None if fetch fails.
|
|
83
|
+
"""
|
|
84
|
+
# Check cache first
|
|
85
|
+
cached = self._load_from_cache(cid)
|
|
86
|
+
if cached is not None:
|
|
87
|
+
return cached
|
|
88
|
+
|
|
89
|
+
# Rate limit gateway requests
|
|
90
|
+
self._rate_limit()
|
|
91
|
+
|
|
92
|
+
# Try each gateway
|
|
93
|
+
async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
|
|
94
|
+
for gateway in self.GATEWAYS:
|
|
95
|
+
try:
|
|
96
|
+
url = f"{gateway}{cid}"
|
|
97
|
+
response = await client.get(url)
|
|
98
|
+
if response.status_code == 200:
|
|
99
|
+
data = response.json()
|
|
100
|
+
# The IPFS log is wrapped in a list, unwrap it
|
|
101
|
+
if isinstance(data, list) and len(data) == 1:
|
|
102
|
+
data = data[0]
|
|
103
|
+
# Cache the successful result
|
|
104
|
+
self._save_to_cache(cid, data)
|
|
105
|
+
return data
|
|
106
|
+
except Exception:
|
|
107
|
+
continue # Try next gateway
|
|
108
|
+
|
|
109
|
+
return None
|
|
110
|
+
|
|
111
|
+
def get_operator_frame_rewards(self, log_data: dict, operator_id: int) -> int | None:
|
|
112
|
+
"""
|
|
113
|
+
Extract operator's distributed_rewards for a frame.
|
|
114
|
+
|
|
115
|
+
Returns rewards in wei (shares), or None if operator not in frame.
|
|
116
|
+
"""
|
|
117
|
+
operators = log_data.get("operators", {})
|
|
118
|
+
op_key = str(operator_id)
|
|
119
|
+
|
|
120
|
+
if op_key not in operators:
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
return operators[op_key].get("distributed_rewards", 0)
|
|
124
|
+
|
|
125
|
+
def get_frame_info(self, log_data: dict) -> tuple[int, int]:
|
|
126
|
+
"""
|
|
127
|
+
Extract frame epoch range from log data.
|
|
128
|
+
|
|
129
|
+
Returns (start_epoch, end_epoch).
|
|
130
|
+
"""
|
|
131
|
+
frame = log_data.get("frame", [0, 0])
|
|
132
|
+
if not isinstance(frame, list) or len(frame) < 2:
|
|
133
|
+
return (0, 0)
|
|
134
|
+
return (frame[0], frame[1])
|
|
135
|
+
|
|
136
|
+
async def get_operator_history(
|
|
137
|
+
self,
|
|
138
|
+
operator_id: int,
|
|
139
|
+
log_cids: list[dict], # List of {block, logCid} from events
|
|
140
|
+
) -> list[FrameData]:
|
|
141
|
+
"""
|
|
142
|
+
Fetch all historical frame data for an operator.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
operator_id: The operator ID to look up
|
|
146
|
+
log_cids: List of {block, logCid} dicts from DistributionLogUpdated events
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
List of FrameData objects, sorted by epoch (oldest first)
|
|
150
|
+
"""
|
|
151
|
+
frames = []
|
|
152
|
+
|
|
153
|
+
for entry in log_cids:
|
|
154
|
+
cid = entry["logCid"]
|
|
155
|
+
block = entry["block"]
|
|
156
|
+
|
|
157
|
+
log_data = await self.fetch_log(cid)
|
|
158
|
+
if log_data is None:
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
rewards = self.get_operator_frame_rewards(log_data, operator_id)
|
|
162
|
+
if rewards is None:
|
|
163
|
+
# Operator not in this frame (may have joined later)
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
start_epoch, end_epoch = self.get_frame_info(log_data)
|
|
167
|
+
|
|
168
|
+
frames.append(
|
|
169
|
+
FrameData(
|
|
170
|
+
start_epoch=start_epoch,
|
|
171
|
+
end_epoch=end_epoch,
|
|
172
|
+
log_cid=cid,
|
|
173
|
+
block_number=block,
|
|
174
|
+
distributed_rewards=rewards,
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Sort by epoch (oldest first)
|
|
179
|
+
frames.sort(key=lambda f: f.start_epoch)
|
|
180
|
+
return frames
|
|
181
|
+
|
|
182
|
+
def calculate_frame_duration_days(self, frame: FrameData) -> float:
|
|
183
|
+
"""Calculate the duration of a frame in days."""
|
|
184
|
+
# Each epoch is 6.4 minutes (384 seconds = 32 slots * 12 seconds)
|
|
185
|
+
epochs = frame.end_epoch - frame.start_epoch
|
|
186
|
+
minutes = epochs * 6.4
|
|
187
|
+
return minutes / (60 * 24)
|
|
188
|
+
|
|
189
|
+
def calculate_historical_apy(
|
|
190
|
+
self,
|
|
191
|
+
frames: list[FrameData],
|
|
192
|
+
bond_eth: Decimal,
|
|
193
|
+
periods: list[int] | None = None,
|
|
194
|
+
) -> dict[str, float | None]:
|
|
195
|
+
"""
|
|
196
|
+
Calculate APY from historical frame data.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
frames: List of FrameData objects (oldest first)
|
|
200
|
+
bond_eth: Current bond in ETH (used for all periods)
|
|
201
|
+
periods: List of day counts to calculate APY for (default: [28, None] for 28d and LTD)
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Dict mapping period name to APY percentage (e.g., {"28d": 3.92, "ltd": 4.10})
|
|
205
|
+
|
|
206
|
+
Note:
|
|
207
|
+
For lifetime APY, only frames with non-zero rewards are included. This avoids
|
|
208
|
+
artificially low APY for operators who had a ramp-up period with no rewards.
|
|
209
|
+
"""
|
|
210
|
+
if periods is None:
|
|
211
|
+
periods = [28, None] # 28-day and lifetime
|
|
212
|
+
|
|
213
|
+
if not frames or bond_eth <= 0:
|
|
214
|
+
return {self._period_name(p): None for p in periods}
|
|
215
|
+
|
|
216
|
+
results = {}
|
|
217
|
+
|
|
218
|
+
for period in periods:
|
|
219
|
+
if period is None:
|
|
220
|
+
# Lifetime: only frames where operator earned rewards
|
|
221
|
+
# This avoids ramp-up periods with 0 rewards skewing the APY
|
|
222
|
+
selected_frames = [f for f in frames if f.distributed_rewards > 0]
|
|
223
|
+
else:
|
|
224
|
+
# Select frames within the period
|
|
225
|
+
# Work backwards from most recent frame
|
|
226
|
+
total_days = 0.0
|
|
227
|
+
selected_frames = []
|
|
228
|
+
for frame in reversed(frames):
|
|
229
|
+
frame_days = self.calculate_frame_duration_days(frame)
|
|
230
|
+
if total_days + frame_days <= period * 1.5: # Allow some buffer
|
|
231
|
+
selected_frames.insert(0, frame)
|
|
232
|
+
total_days += frame_days
|
|
233
|
+
if total_days >= period:
|
|
234
|
+
break
|
|
235
|
+
|
|
236
|
+
if not selected_frames:
|
|
237
|
+
results[self._period_name(period)] = None
|
|
238
|
+
continue
|
|
239
|
+
|
|
240
|
+
# Sum rewards and calculate total days
|
|
241
|
+
total_rewards_wei = sum(f.distributed_rewards for f in selected_frames)
|
|
242
|
+
total_days = sum(self.calculate_frame_duration_days(f) for f in selected_frames)
|
|
243
|
+
|
|
244
|
+
if total_days <= 0:
|
|
245
|
+
results[self._period_name(period)] = None
|
|
246
|
+
continue
|
|
247
|
+
|
|
248
|
+
# Convert rewards to ETH
|
|
249
|
+
total_rewards_eth = Decimal(total_rewards_wei) / Decimal(10**18)
|
|
250
|
+
|
|
251
|
+
# Annualize: (rewards / bond) * (365 / days) * 100
|
|
252
|
+
apy = float(total_rewards_eth / bond_eth) * (365.0 / total_days) * 100
|
|
253
|
+
|
|
254
|
+
results[self._period_name(period)] = round(apy, 2)
|
|
255
|
+
|
|
256
|
+
return results
|
|
257
|
+
|
|
258
|
+
def _period_name(self, period: int | None) -> str:
|
|
259
|
+
"""Convert period days to display name."""
|
|
260
|
+
if period is None:
|
|
261
|
+
return "ltd"
|
|
262
|
+
return f"{period}d"
|
|
263
|
+
|
|
264
|
+
def clear_cache(self) -> None:
|
|
265
|
+
"""Clear all cached IPFS logs."""
|
|
266
|
+
for cache_file in self.cache_dir.glob("*.json"):
|
|
267
|
+
cache_file.unlink(missing_ok=True)
|
src/data/known_cids.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""Known historical distribution log CIDs as fallback.
|
|
2
|
+
|
|
3
|
+
These are manually curated from on-chain DistributionLogUpdated events.
|
|
4
|
+
Used when neither Etherscan API nor RPC event queries are available.
|
|
5
|
+
|
|
6
|
+
To update this list:
|
|
7
|
+
1. Query events from Etherscan or a full node
|
|
8
|
+
2. Add new entries in chronological order (oldest first)
|
|
9
|
+
|
|
10
|
+
Last updated: December 2025
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
# Format: {"block": block_number, "logCid": "Qm..."}
|
|
14
|
+
KNOWN_DISTRIBUTION_LOGS: list[dict] = [
|
|
15
|
+
# CSM launched Dec 2024, first distribution frame ~Jan 2025
|
|
16
|
+
{"block": 21277898, "logCid": "QmezkGCHPUJ9XSAJfibmo6Sup35VgbhnodfYsc1xNT3rbo"},
|
|
17
|
+
{"block": 21445874, "logCid": "Qmb5CZUD9uLXP9LS68jnJp1v2GTF1KjYsNLJuML9fpRufE"},
|
|
18
|
+
{"block": 21644860, "logCid": "QmePUqG8tMXbv3eHDu3j56Dod4gwmGh1Vapsh7u4gxotT4"},
|
|
19
|
+
{"block": 21859279, "logCid": "QmT5JWn3sR7fYxxxSh3kHBmjZyPBWjKb6CsSLGXMQbLXMX"},
|
|
20
|
+
{"block": 22047254, "logCid": "QmWxANi2GWvoxwnPRsxwZNF6NRyjwMBPAF4bBMcL3HGG3i"},
|
|
21
|
+
{"block": 22247841, "logCid": "QmYQPDuqVbxWq2YNSZS55LE3eTeriy51HTCgBHLiW9fN7N"},
|
|
22
|
+
{"block": 22448016, "logCid": "QmeZduNqrnSMLTVE5tkNDv2WhtL3uAgHRP1915m5CHcCqM"},
|
|
23
|
+
{"block": 22646060, "logCid": "QmaHU6Ah99Yk6kQVtSrN4inxqqYoU6epZ5UKyDvwdYUKAS"},
|
|
24
|
+
{"block": 22847998, "logCid": "Qmemm9gD2fQgwNziBsf9mAaveNXJ3eJvHpqBTWKoLdUXXV"},
|
|
25
|
+
{"block": 23048383, "logCid": "QmVgGQS7QBeRMq2noqqxekY5ezmqRsgu7JjiyMyRaaWEDv"},
|
|
26
|
+
{"block": 23248929, "logCid": "QmaUC2HBv88mJ9Gf99hfNgtH4qo2F1yHaBMC4imwVhxDDi"},
|
|
27
|
+
{"block": 23463926, "logCid": "QmPPFkydgtnwMBDF6nZZaU5nnqy3csbKts3UfRRgWXreEu"},
|
|
28
|
+
{"block": 23649468, "logCid": "QmSdx8WFnaeMWLKURBYgMMiixZ9z4xn3mvGPBeuRzPt6MQ"},
|
|
29
|
+
{"block": 23849500, "logCid": "QmZyzTYdSait7BYCEToFJFJ6qVkX2HJBrrvXhk64e82xoK"},
|
|
30
|
+
]
|
src/data/lido_api.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Lido protocol API for stETH APR and other metrics."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from .cache import cached
|
|
6
|
+
|
|
7
|
+
LIDO_API_BASE = "https://eth-api.lido.fi/v1"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LidoAPIProvider:
|
|
11
|
+
"""Fetches data from Lido's public API."""
|
|
12
|
+
|
|
13
|
+
@cached(ttl=3600) # Cache for 1 hour
|
|
14
|
+
async def get_steth_apr(self) -> dict:
|
|
15
|
+
"""
|
|
16
|
+
Get current stETH APR from Lido API.
|
|
17
|
+
|
|
18
|
+
Returns 7-day SMA (simple moving average) APR.
|
|
19
|
+
"""
|
|
20
|
+
async with httpx.AsyncClient(timeout=10.0) as client:
|
|
21
|
+
try:
|
|
22
|
+
response = await client.get(
|
|
23
|
+
f"{LIDO_API_BASE}/protocol/steth/apr/sma"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
if response.status_code == 200:
|
|
27
|
+
data = response.json()
|
|
28
|
+
return {
|
|
29
|
+
"apr": float(data.get("data", {}).get("smaApr", 0)),
|
|
30
|
+
"timestamp": data.get("data", {}).get("timeUnix"),
|
|
31
|
+
}
|
|
32
|
+
except Exception:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
return {"apr": None, "timestamp": None}
|
src/data/onchain.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""On-chain data fetching via Web3."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from decimal import Decimal
|
|
5
|
+
|
|
6
|
+
from web3 import Web3
|
|
7
|
+
|
|
8
|
+
from ..core.config import get_settings
|
|
9
|
+
from ..core.contracts import (
|
|
10
|
+
CSACCOUNTING_ABI,
|
|
11
|
+
CSFEEDISTRIBUTOR_ABI,
|
|
12
|
+
CSMODULE_ABI,
|
|
13
|
+
STETH_ABI,
|
|
14
|
+
)
|
|
15
|
+
from ..core.types import BondSummary, NodeOperator
|
|
16
|
+
from .cache import cached
|
|
17
|
+
from .etherscan import EtherscanProvider
|
|
18
|
+
from .known_cids import KNOWN_DISTRIBUTION_LOGS
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class OnChainDataProvider:
|
|
22
|
+
"""Fetches data from Ethereum contracts."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, rpc_url: str | None = None):
|
|
25
|
+
self.settings = get_settings()
|
|
26
|
+
self.w3 = Web3(Web3.HTTPProvider(rpc_url or self.settings.eth_rpc_url))
|
|
27
|
+
|
|
28
|
+
# Initialize contracts
|
|
29
|
+
self.csmodule = self.w3.eth.contract(
|
|
30
|
+
address=Web3.to_checksum_address(self.settings.csmodule_address),
|
|
31
|
+
abi=CSMODULE_ABI,
|
|
32
|
+
)
|
|
33
|
+
self.csaccounting = self.w3.eth.contract(
|
|
34
|
+
address=Web3.to_checksum_address(self.settings.csaccounting_address),
|
|
35
|
+
abi=CSACCOUNTING_ABI,
|
|
36
|
+
)
|
|
37
|
+
self.csfeedistributor = self.w3.eth.contract(
|
|
38
|
+
address=Web3.to_checksum_address(self.settings.csfeedistributor_address),
|
|
39
|
+
abi=CSFEEDISTRIBUTOR_ABI,
|
|
40
|
+
)
|
|
41
|
+
self.steth = self.w3.eth.contract(
|
|
42
|
+
address=Web3.to_checksum_address(self.settings.steth_address),
|
|
43
|
+
abi=STETH_ABI,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
@cached(ttl=60)
|
|
47
|
+
async def get_node_operators_count(self) -> int:
|
|
48
|
+
"""Get total number of node operators."""
|
|
49
|
+
return self.csmodule.functions.getNodeOperatorsCount().call()
|
|
50
|
+
|
|
51
|
+
@cached(ttl=300)
|
|
52
|
+
async def get_node_operator(self, operator_id: int) -> NodeOperator:
|
|
53
|
+
"""Get node operator data by ID."""
|
|
54
|
+
data = self.csmodule.functions.getNodeOperator(operator_id).call()
|
|
55
|
+
return NodeOperator(
|
|
56
|
+
node_operator_id=operator_id,
|
|
57
|
+
total_added_keys=data[0],
|
|
58
|
+
total_withdrawn_keys=data[1],
|
|
59
|
+
total_deposited_keys=data[2],
|
|
60
|
+
total_vetted_keys=data[3],
|
|
61
|
+
stuck_validators_count=data[4],
|
|
62
|
+
depositable_validators_count=data[5],
|
|
63
|
+
target_limit=data[6],
|
|
64
|
+
target_limit_mode=data[7],
|
|
65
|
+
total_exited_keys=data[8],
|
|
66
|
+
enqueued_count=data[9],
|
|
67
|
+
manager_address=data[10],
|
|
68
|
+
proposed_manager_address=data[11],
|
|
69
|
+
reward_address=data[12],
|
|
70
|
+
proposed_reward_address=data[13],
|
|
71
|
+
extended_manager_permissions=data[14],
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
async def find_operator_by_address(self, address: str) -> int | None:
|
|
75
|
+
"""
|
|
76
|
+
Find operator ID by manager or reward address.
|
|
77
|
+
|
|
78
|
+
Tries batch requests first (faster if RPC supports JSON-RPC batching).
|
|
79
|
+
Falls back to sequential calls with rate limiting if batch fails.
|
|
80
|
+
"""
|
|
81
|
+
address = Web3.to_checksum_address(address)
|
|
82
|
+
total = await self.get_node_operators_count()
|
|
83
|
+
|
|
84
|
+
# Try batch requests first (not all RPCs support this)
|
|
85
|
+
batch_size = 50
|
|
86
|
+
batch_supported = True
|
|
87
|
+
|
|
88
|
+
for start in range(0, total, batch_size):
|
|
89
|
+
end = min(start + batch_size, total)
|
|
90
|
+
|
|
91
|
+
if batch_supported:
|
|
92
|
+
try:
|
|
93
|
+
with self.w3.batch_requests() as batch:
|
|
94
|
+
for op_id in range(start, end):
|
|
95
|
+
batch.add(self.csmodule.functions.getNodeOperator(op_id))
|
|
96
|
+
results = batch.execute()
|
|
97
|
+
|
|
98
|
+
for i, data in enumerate(results):
|
|
99
|
+
op_id = start + i
|
|
100
|
+
manager = data[10]
|
|
101
|
+
reward = data[12]
|
|
102
|
+
if manager.lower() == address.lower() or reward.lower() == address.lower():
|
|
103
|
+
return op_id
|
|
104
|
+
continue # Batch succeeded, move to next batch
|
|
105
|
+
except Exception:
|
|
106
|
+
# Batch not supported by this RPC, fall back to sequential
|
|
107
|
+
batch_supported = False
|
|
108
|
+
|
|
109
|
+
# Sequential fallback with rate limiting
|
|
110
|
+
for op_id in range(start, end):
|
|
111
|
+
try:
|
|
112
|
+
data = self.csmodule.functions.getNodeOperator(op_id).call()
|
|
113
|
+
manager = data[10]
|
|
114
|
+
reward = data[12]
|
|
115
|
+
if manager.lower() == address.lower() or reward.lower() == address.lower():
|
|
116
|
+
return op_id
|
|
117
|
+
# Small delay to avoid rate limiting on public RPCs
|
|
118
|
+
await asyncio.sleep(0.05)
|
|
119
|
+
except Exception:
|
|
120
|
+
await asyncio.sleep(0.1) # Longer delay on error
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
return None
|
|
124
|
+
|
|
125
|
+
@cached(ttl=60)
|
|
126
|
+
async def get_bond_summary(self, operator_id: int) -> BondSummary:
|
|
127
|
+
"""Get bond summary for an operator."""
|
|
128
|
+
current, required = self.csaccounting.functions.getBondSummary(
|
|
129
|
+
operator_id
|
|
130
|
+
).call()
|
|
131
|
+
|
|
132
|
+
current_eth = Decimal(current) / Decimal(10**18)
|
|
133
|
+
required_eth = Decimal(required) / Decimal(10**18)
|
|
134
|
+
excess_eth = max(Decimal(0), current_eth - required_eth)
|
|
135
|
+
|
|
136
|
+
return BondSummary(
|
|
137
|
+
current_bond_wei=current,
|
|
138
|
+
required_bond_wei=required,
|
|
139
|
+
current_bond_eth=current_eth,
|
|
140
|
+
required_bond_eth=required_eth,
|
|
141
|
+
excess_bond_eth=excess_eth,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
@cached(ttl=60)
|
|
145
|
+
async def get_distributed_shares(self, operator_id: int) -> int:
|
|
146
|
+
"""Get already distributed (claimed) shares for operator."""
|
|
147
|
+
return self.csfeedistributor.functions.distributedShares(operator_id).call()
|
|
148
|
+
|
|
149
|
+
@cached(ttl=60)
|
|
150
|
+
async def shares_to_eth(self, shares: int) -> Decimal:
|
|
151
|
+
"""Convert stETH shares to ETH value."""
|
|
152
|
+
if shares == 0:
|
|
153
|
+
return Decimal(0)
|
|
154
|
+
eth_wei = self.steth.functions.getPooledEthByShares(shares).call()
|
|
155
|
+
return Decimal(eth_wei) / Decimal(10**18)
|
|
156
|
+
|
|
157
|
+
async def get_signing_keys(
|
|
158
|
+
self, operator_id: int, start: int = 0, count: int = 100
|
|
159
|
+
) -> list[str]:
|
|
160
|
+
"""Get validator pubkeys for an operator.
|
|
161
|
+
|
|
162
|
+
Fetches in batches of 100 to avoid RPC limits on large operators.
|
|
163
|
+
"""
|
|
164
|
+
keys = []
|
|
165
|
+
batch_size = 100
|
|
166
|
+
|
|
167
|
+
for batch_start in range(start, start + count, batch_size):
|
|
168
|
+
batch_count = min(batch_size, start + count - batch_start)
|
|
169
|
+
keys_bytes = self.csmodule.functions.getSigningKeys(
|
|
170
|
+
operator_id, batch_start, batch_count
|
|
171
|
+
).call()
|
|
172
|
+
# Each key is 48 bytes
|
|
173
|
+
for i in range(0, len(keys_bytes), 48):
|
|
174
|
+
key = "0x" + keys_bytes[i : i + 48].hex()
|
|
175
|
+
keys.append(key)
|
|
176
|
+
|
|
177
|
+
return keys
|
|
178
|
+
|
|
179
|
+
def get_current_log_cid(self) -> str:
|
|
180
|
+
"""Get the current distribution log CID from the contract."""
|
|
181
|
+
return self.csfeedistributor.functions.logCid().call()
|
|
182
|
+
|
|
183
|
+
@cached(ttl=3600) # Cache for 1 hour since historical events don't change
|
|
184
|
+
async def get_distribution_log_history(
|
|
185
|
+
self, start_block: int | None = None
|
|
186
|
+
) -> list[dict]:
|
|
187
|
+
"""
|
|
188
|
+
Query DistributionLogUpdated events to get historical logCids.
|
|
189
|
+
|
|
190
|
+
Tries multiple methods in order:
|
|
191
|
+
1. Etherscan API (if API key configured) - most reliable
|
|
192
|
+
2. Chunked RPC queries (10k block chunks) - works on some RPCs
|
|
193
|
+
3. Hardcoded known CIDs - fallback for users without API keys
|
|
194
|
+
4. Current logCid from contract - ultimate fallback
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
start_block: Starting block number (default: CSM deployment ~20873000)
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
List of {block, logCid} dicts, sorted by block number (oldest first)
|
|
201
|
+
"""
|
|
202
|
+
# CSM was deployed around block 20873000 (Dec 2024)
|
|
203
|
+
if start_block is None:
|
|
204
|
+
start_block = 20873000
|
|
205
|
+
|
|
206
|
+
# 1. Try Etherscan API first (most reliable)
|
|
207
|
+
etherscan = EtherscanProvider()
|
|
208
|
+
if etherscan.is_available():
|
|
209
|
+
events = await etherscan.get_distribution_log_events(
|
|
210
|
+
self.settings.csfeedistributor_address,
|
|
211
|
+
start_block,
|
|
212
|
+
)
|
|
213
|
+
if events:
|
|
214
|
+
return events
|
|
215
|
+
|
|
216
|
+
# 2. Try chunked RPC queries
|
|
217
|
+
events = await self._query_events_chunked(start_block)
|
|
218
|
+
if events:
|
|
219
|
+
return events
|
|
220
|
+
|
|
221
|
+
# 3. Use known historical CIDs as fallback
|
|
222
|
+
if KNOWN_DISTRIBUTION_LOGS:
|
|
223
|
+
return KNOWN_DISTRIBUTION_LOGS
|
|
224
|
+
|
|
225
|
+
# 4. Ultimate fallback: current logCid only
|
|
226
|
+
try:
|
|
227
|
+
current_cid = self.get_current_log_cid()
|
|
228
|
+
if current_cid:
|
|
229
|
+
current_block = self.w3.eth.block_number
|
|
230
|
+
return [{"block": current_block, "logCid": current_cid}]
|
|
231
|
+
except Exception:
|
|
232
|
+
pass
|
|
233
|
+
|
|
234
|
+
return []
|
|
235
|
+
|
|
236
|
+
async def _query_events_chunked(
|
|
237
|
+
self, start_block: int, chunk_size: int = 10000
|
|
238
|
+
) -> list[dict]:
|
|
239
|
+
"""Query events in smaller chunks to work around RPC limitations."""
|
|
240
|
+
current_block = self.w3.eth.block_number
|
|
241
|
+
all_events = []
|
|
242
|
+
|
|
243
|
+
for from_block in range(start_block, current_block, chunk_size):
|
|
244
|
+
to_block = min(from_block + chunk_size - 1, current_block)
|
|
245
|
+
try:
|
|
246
|
+
events = self.csfeedistributor.events.DistributionLogUpdated.get_logs(
|
|
247
|
+
from_block=from_block,
|
|
248
|
+
to_block=to_block,
|
|
249
|
+
)
|
|
250
|
+
for e in events:
|
|
251
|
+
all_events.append(
|
|
252
|
+
{"block": e["blockNumber"], "logCid": e["args"]["logCid"]}
|
|
253
|
+
)
|
|
254
|
+
except Exception:
|
|
255
|
+
# If chunked queries fail, give up on this method
|
|
256
|
+
return []
|
|
257
|
+
|
|
258
|
+
return sorted(all_events, key=lambda x: x["block"])
|
src/data/rewards_tree.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""Fetch and parse the rewards merkle tree from GitHub."""
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from ..core.config import get_settings
|
|
6
|
+
from ..core.types import RewardsInfo
|
|
7
|
+
from .cache import cached
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RewardsTreeProvider:
|
|
11
|
+
"""Fetches rewards data from the csm-rewards repository."""
|
|
12
|
+
|
|
13
|
+
def __init__(self):
|
|
14
|
+
self.settings = get_settings()
|
|
15
|
+
|
|
16
|
+
@cached(ttl=3600) # Cache for 1 hour since tree updates infrequently
|
|
17
|
+
async def fetch_rewards_data(self) -> dict:
|
|
18
|
+
"""
|
|
19
|
+
Fetch the proofs.json file which contains:
|
|
20
|
+
{
|
|
21
|
+
"CSM Operator 0": {
|
|
22
|
+
"cumulativeFeeShares": 304687403773285400,
|
|
23
|
+
"proof": ["0x...", "0x...", ...]
|
|
24
|
+
},
|
|
25
|
+
...
|
|
26
|
+
}
|
|
27
|
+
"""
|
|
28
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
29
|
+
response = await client.get(self.settings.rewards_proofs_url)
|
|
30
|
+
response.raise_for_status()
|
|
31
|
+
return response.json()
|
|
32
|
+
|
|
33
|
+
async def get_operator_rewards(self, operator_id: int) -> RewardsInfo | None:
|
|
34
|
+
"""Get rewards info for a specific operator."""
|
|
35
|
+
data = await self.fetch_rewards_data()
|
|
36
|
+
key = f"CSM Operator {operator_id}"
|
|
37
|
+
|
|
38
|
+
if key not in data:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
entry = data[key]
|
|
42
|
+
return RewardsInfo(
|
|
43
|
+
cumulative_fee_shares=entry["cumulativeFeeShares"],
|
|
44
|
+
proof=entry["proof"],
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
async def get_all_operators_with_rewards(self) -> list[int]:
|
|
48
|
+
"""Get list of all operator IDs that have rewards."""
|
|
49
|
+
data = await self.fetch_rewards_data()
|
|
50
|
+
operator_ids = []
|
|
51
|
+
for key in data.keys():
|
|
52
|
+
if key.startswith("CSM Operator "):
|
|
53
|
+
try:
|
|
54
|
+
op_id = int(key.replace("CSM Operator ", ""))
|
|
55
|
+
operator_ids.append(op_id)
|
|
56
|
+
except ValueError:
|
|
57
|
+
continue
|
|
58
|
+
return sorted(operator_ids)
|