csm-dashboard 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {csm_dashboard-0.2.2.dist-info → csm_dashboard-0.3.1.dist-info}/METADATA +90 -25
- csm_dashboard-0.3.1.dist-info/RECORD +32 -0
- src/abis/CSAccounting.json +22 -0
- src/abis/stETH.json +10 -0
- src/cli/commands.py +266 -45
- src/core/config.py +3 -0
- src/core/types.py +77 -5
- src/data/etherscan.py +60 -0
- src/data/ipfs_logs.py +42 -2
- src/data/lido_api.py +105 -0
- src/data/onchain.py +191 -0
- src/data/strikes.py +40 -7
- src/services/operator_service.py +352 -34
- src/web/app.py +10 -7
- src/web/routes.py +77 -11
- csm_dashboard-0.2.2.dist-info/RECORD +0 -32
- {csm_dashboard-0.2.2.dist-info → csm_dashboard-0.3.1.dist-info}/WHEEL +0 -0
- {csm_dashboard-0.2.2.dist-info → csm_dashboard-0.3.1.dist-info}/entry_points.txt +0 -0
src/data/ipfs_logs.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import time
|
|
5
5
|
from dataclasses import dataclass
|
|
6
|
+
from datetime import datetime, timezone
|
|
6
7
|
from decimal import Decimal
|
|
7
8
|
from pathlib import Path
|
|
8
9
|
|
|
@@ -11,6 +12,19 @@ import httpx
|
|
|
11
12
|
from ..core.config import get_settings
|
|
12
13
|
|
|
13
14
|
|
|
15
|
+
# Ethereum Beacon Chain genesis timestamp (Dec 1, 2020 12:00:23 UTC)
|
|
16
|
+
BEACON_GENESIS = 1606824023
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def epoch_to_datetime(epoch: int) -> datetime:
|
|
20
|
+
"""Convert beacon chain epoch to datetime.
|
|
21
|
+
|
|
22
|
+
Each epoch is 32 slots * 12 seconds = 384 seconds.
|
|
23
|
+
"""
|
|
24
|
+
timestamp = BEACON_GENESIS + (epoch * 384)
|
|
25
|
+
return datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
|
26
|
+
|
|
27
|
+
|
|
14
28
|
@dataclass
|
|
15
29
|
class FrameData:
|
|
16
30
|
"""Data from a single distribution frame."""
|
|
@@ -20,6 +34,7 @@ class FrameData:
|
|
|
20
34
|
log_cid: str
|
|
21
35
|
block_number: int
|
|
22
36
|
distributed_rewards: int # For specific operator, in wei
|
|
37
|
+
validator_count: int # Number of validators for operator in this frame
|
|
23
38
|
|
|
24
39
|
|
|
25
40
|
class IPFSLogProvider:
|
|
@@ -27,7 +42,6 @@ class IPFSLogProvider:
|
|
|
27
42
|
|
|
28
43
|
# IPFS gateways to try in order
|
|
29
44
|
GATEWAYS = [
|
|
30
|
-
"https://dweb.link/ipfs/",
|
|
31
45
|
"https://ipfs.io/ipfs/",
|
|
32
46
|
"https://cloudflare-ipfs.com/ipfs/",
|
|
33
47
|
]
|
|
@@ -113,6 +127,9 @@ class IPFSLogProvider:
|
|
|
113
127
|
Extract operator's distributed_rewards for a frame.
|
|
114
128
|
|
|
115
129
|
Returns rewards in wei (shares), or None if operator not in frame.
|
|
130
|
+
|
|
131
|
+
Note: The IPFS log field name changed from "distributed" to "distributed_rewards"
|
|
132
|
+
around Dec 2025. We check both for backwards compatibility.
|
|
116
133
|
"""
|
|
117
134
|
operators = log_data.get("operators", {})
|
|
118
135
|
op_key = str(operator_id)
|
|
@@ -120,7 +137,12 @@ class IPFSLogProvider:
|
|
|
120
137
|
if op_key not in operators:
|
|
121
138
|
return None
|
|
122
139
|
|
|
123
|
-
|
|
140
|
+
op_data = operators[op_key]
|
|
141
|
+
# Handle both new and old field names for backwards compatibility
|
|
142
|
+
rewards = op_data.get("distributed_rewards")
|
|
143
|
+
if rewards is None:
|
|
144
|
+
rewards = op_data.get("distributed") # Fallback to old field name
|
|
145
|
+
return rewards if rewards is not None else 0
|
|
124
146
|
|
|
125
147
|
def get_frame_info(self, log_data: dict) -> tuple[int, int]:
|
|
126
148
|
"""
|
|
@@ -133,6 +155,22 @@ class IPFSLogProvider:
|
|
|
133
155
|
return (0, 0)
|
|
134
156
|
return (frame[0], frame[1])
|
|
135
157
|
|
|
158
|
+
def get_operator_validator_count(self, log_data: dict, operator_id: int) -> int:
|
|
159
|
+
"""
|
|
160
|
+
Get the number of validators for an operator in a frame.
|
|
161
|
+
|
|
162
|
+
Returns the count of validators, or 0 if operator not in frame.
|
|
163
|
+
"""
|
|
164
|
+
operators = log_data.get("operators", {})
|
|
165
|
+
op_key = str(operator_id)
|
|
166
|
+
|
|
167
|
+
if op_key not in operators:
|
|
168
|
+
return 0
|
|
169
|
+
|
|
170
|
+
op_data = operators[op_key]
|
|
171
|
+
validators = op_data.get("validators", {})
|
|
172
|
+
return len(validators)
|
|
173
|
+
|
|
136
174
|
async def get_operator_history(
|
|
137
175
|
self,
|
|
138
176
|
operator_id: int,
|
|
@@ -164,6 +202,7 @@ class IPFSLogProvider:
|
|
|
164
202
|
continue
|
|
165
203
|
|
|
166
204
|
start_epoch, end_epoch = self.get_frame_info(log_data)
|
|
205
|
+
validator_count = self.get_operator_validator_count(log_data, operator_id)
|
|
167
206
|
|
|
168
207
|
frames.append(
|
|
169
208
|
FrameData(
|
|
@@ -172,6 +211,7 @@ class IPFSLogProvider:
|
|
|
172
211
|
log_cid=cid,
|
|
173
212
|
block_number=block,
|
|
174
213
|
distributed_rewards=rewards,
|
|
214
|
+
validator_count=validator_count,
|
|
175
215
|
)
|
|
176
216
|
)
|
|
177
217
|
|
src/data/lido_api.py
CHANGED
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
|
|
5
|
+
from ..core.config import get_settings
|
|
5
6
|
from .cache import cached
|
|
6
7
|
|
|
7
8
|
LIDO_API_BASE = "https://eth-api.lido.fi/v1"
|
|
9
|
+
LIDO_SUBGRAPH_ID = "Sxx812XgeKyzQPaBpR5YZWmGV5fZuBaPdh7DFhzSwiQ"
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
class LidoAPIProvider:
|
|
@@ -33,3 +35,106 @@ class LidoAPIProvider:
|
|
|
33
35
|
pass
|
|
34
36
|
|
|
35
37
|
return {"apr": None, "timestamp": None}
|
|
38
|
+
|
|
39
|
+
@cached(ttl=3600) # Cache for 1 hour
|
|
40
|
+
async def get_historical_apr_data(self) -> list[dict]:
|
|
41
|
+
"""Fetch historical APR data from Lido subgraph.
|
|
42
|
+
|
|
43
|
+
Returns list of {block, apr, blockTime} sorted by block ascending.
|
|
44
|
+
Returns empty list if API key not configured or query fails.
|
|
45
|
+
"""
|
|
46
|
+
settings = get_settings()
|
|
47
|
+
if not settings.thegraph_api_key:
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
# Query in descending order to get most recent 1000 entries
|
|
51
|
+
# (CSM frames are at blocks 21M+, we need recent data)
|
|
52
|
+
query = """
|
|
53
|
+
{
|
|
54
|
+
totalRewards(first: 1000, orderBy: block, orderDirection: desc) {
|
|
55
|
+
apr
|
|
56
|
+
block
|
|
57
|
+
blockTime
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
endpoint = f"https://gateway-arbitrum.network.thegraph.com/api/{settings.thegraph_api_key}/subgraphs/id/{LIDO_SUBGRAPH_ID}"
|
|
63
|
+
|
|
64
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
65
|
+
try:
|
|
66
|
+
response = await client.post(
|
|
67
|
+
endpoint,
|
|
68
|
+
json={"query": query},
|
|
69
|
+
headers={"Content-Type": "application/json"},
|
|
70
|
+
)
|
|
71
|
+
if response.status_code == 200:
|
|
72
|
+
data = response.json()
|
|
73
|
+
results = data.get("data", {}).get("totalRewards", [])
|
|
74
|
+
# Reverse to get ascending order (oldest to newest) for binary search
|
|
75
|
+
return list(reversed(results))
|
|
76
|
+
except Exception:
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
return []
|
|
80
|
+
|
|
81
|
+
def get_apr_for_block(self, apr_data: list[dict], target_block: int) -> float | None:
|
|
82
|
+
"""Find the APR for a specific block number.
|
|
83
|
+
|
|
84
|
+
Returns the APR from the oracle report closest to (but not after) target_block.
|
|
85
|
+
"""
|
|
86
|
+
if not apr_data:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
# Find the closest report at or before target_block
|
|
90
|
+
closest = None
|
|
91
|
+
for entry in apr_data:
|
|
92
|
+
block = int(entry["block"])
|
|
93
|
+
if block <= target_block:
|
|
94
|
+
closest = entry
|
|
95
|
+
else:
|
|
96
|
+
break # apr_data is sorted ascending
|
|
97
|
+
|
|
98
|
+
return float(closest["apr"]) if closest else None
|
|
99
|
+
|
|
100
|
+
def get_average_apr_for_range(
|
|
101
|
+
self, apr_data: list[dict], start_timestamp: int, end_timestamp: int
|
|
102
|
+
) -> float | None:
|
|
103
|
+
"""Calculate average APR for a time range.
|
|
104
|
+
|
|
105
|
+
Averages all APR values from oracle reports within the given timestamp range.
|
|
106
|
+
Falls back to the closest APR before the range if no reports fall within.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
apr_data: List of {block, apr, blockTime} sorted by block ascending
|
|
110
|
+
start_timestamp: Unix timestamp for range start
|
|
111
|
+
end_timestamp: Unix timestamp for range end
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Average APR as a percentage, or None if no data available
|
|
115
|
+
"""
|
|
116
|
+
if not apr_data:
|
|
117
|
+
return None
|
|
118
|
+
|
|
119
|
+
# Find all APR reports within the time range
|
|
120
|
+
reports_in_range = []
|
|
121
|
+
closest_before = None
|
|
122
|
+
|
|
123
|
+
for entry in apr_data:
|
|
124
|
+
block_time = int(entry["blockTime"])
|
|
125
|
+
if block_time < start_timestamp:
|
|
126
|
+
closest_before = entry # Keep track of most recent before range
|
|
127
|
+
elif block_time <= end_timestamp:
|
|
128
|
+
reports_in_range.append(entry)
|
|
129
|
+
else:
|
|
130
|
+
break # Past the range, stop searching
|
|
131
|
+
|
|
132
|
+
if reports_in_range:
|
|
133
|
+
# Average all reports within the range
|
|
134
|
+
total_apr = sum(float(r["apr"]) for r in reports_in_range)
|
|
135
|
+
return total_apr / len(reports_in_range)
|
|
136
|
+
elif closest_before:
|
|
137
|
+
# No reports in range, use the closest one before
|
|
138
|
+
return float(closest_before["apr"])
|
|
139
|
+
|
|
140
|
+
return None
|
src/data/onchain.py
CHANGED
|
@@ -122,6 +122,81 @@ class OnChainDataProvider:
|
|
|
122
122
|
|
|
123
123
|
return None
|
|
124
124
|
|
|
125
|
+
@cached(ttl=300)
|
|
126
|
+
async def get_bond_curve_id(self, operator_id: int) -> int:
|
|
127
|
+
"""Get the bond curve ID for an operator.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
0 = Permissionless (2.4 ETH first validator, 1.3 ETH subsequent)
|
|
131
|
+
1 = ICS/Legacy EA (1.5 ETH first validator, 1.3 ETH subsequent)
|
|
132
|
+
"""
|
|
133
|
+
try:
|
|
134
|
+
return self.csaccounting.functions.getBondCurveId(operator_id).call()
|
|
135
|
+
except Exception:
|
|
136
|
+
# Fall back to 0 (Permissionless) if call fails
|
|
137
|
+
return 0
|
|
138
|
+
|
|
139
|
+
@staticmethod
|
|
140
|
+
def calculate_required_bond(validator_count: int, curve_id: int = 0) -> Decimal:
|
|
141
|
+
"""Calculate required bond for a given validator count and curve type.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
validator_count: Number of validators
|
|
145
|
+
curve_id: Bond curve ID from CSAccounting contract
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Required bond in ETH
|
|
149
|
+
|
|
150
|
+
Note:
|
|
151
|
+
Curve IDs on mainnet CSM:
|
|
152
|
+
- Curve 0: Original permissionless (2 ETH first, 1.3 ETH subsequent) - deprecated
|
|
153
|
+
- Curve 1: Original ICS/EA (1.5 ETH first, 1.3 ETH subsequent) - deprecated
|
|
154
|
+
- Curve 2: Current permissionless default (1.5 ETH first, 1.3 ETH subsequent)
|
|
155
|
+
The contract returns curve points directly, but for estimation we use
|
|
156
|
+
standard formulas.
|
|
157
|
+
"""
|
|
158
|
+
if validator_count <= 0:
|
|
159
|
+
return Decimal(0)
|
|
160
|
+
|
|
161
|
+
# Curve 2 is the current mainnet default (1.5 ETH first, 1.3 ETH subsequent)
|
|
162
|
+
# Curve 0/1 were the original curves, now deprecated
|
|
163
|
+
if curve_id == 0: # Original Permissionless (deprecated)
|
|
164
|
+
first_bond = Decimal("2.0")
|
|
165
|
+
else: # Curve 1, 2, etc - current default curves
|
|
166
|
+
first_bond = Decimal("1.5")
|
|
167
|
+
|
|
168
|
+
subsequent_bond = Decimal("1.3")
|
|
169
|
+
|
|
170
|
+
if validator_count == 1:
|
|
171
|
+
return first_bond
|
|
172
|
+
else:
|
|
173
|
+
return first_bond + (subsequent_bond * (validator_count - 1))
|
|
174
|
+
|
|
175
|
+
@staticmethod
|
|
176
|
+
def get_operator_type_name(curve_id: int) -> str:
|
|
177
|
+
"""Get human-readable operator type from curve ID.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
curve_id: Bond curve ID from CSAccounting contract
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Operator type name
|
|
184
|
+
|
|
185
|
+
Note:
|
|
186
|
+
Curve IDs on mainnet CSM:
|
|
187
|
+
- Curve 0: Original permissionless (deprecated)
|
|
188
|
+
- Curve 1: Original ICS/EA (deprecated)
|
|
189
|
+
- Curve 2: Current permissionless default
|
|
190
|
+
"""
|
|
191
|
+
if curve_id == 0:
|
|
192
|
+
return "Permissionless (Legacy)"
|
|
193
|
+
elif curve_id == 1:
|
|
194
|
+
return "ICS/Legacy EA"
|
|
195
|
+
elif curve_id == 2:
|
|
196
|
+
return "Permissionless"
|
|
197
|
+
else:
|
|
198
|
+
return f"Custom (Curve {curve_id})"
|
|
199
|
+
|
|
125
200
|
@cached(ttl=60)
|
|
126
201
|
async def get_bond_summary(self, operator_id: int) -> BondSummary:
|
|
127
202
|
"""Get bond summary for an operator."""
|
|
@@ -256,3 +331,119 @@ class OnChainDataProvider:
|
|
|
256
331
|
return []
|
|
257
332
|
|
|
258
333
|
return sorted(all_events, key=lambda x: x["block"])
|
|
334
|
+
|
|
335
|
+
@cached(ttl=3600) # Cache for 1 hour
|
|
336
|
+
async def get_withdrawal_history(
|
|
337
|
+
self, reward_address: str, start_block: int | None = None
|
|
338
|
+
) -> list[dict]:
|
|
339
|
+
"""
|
|
340
|
+
Get withdrawal history for an operator's reward address.
|
|
341
|
+
|
|
342
|
+
Queries stETH Transfer events from CSAccounting to the reward address.
|
|
343
|
+
These represent when the operator claimed their rewards.
|
|
344
|
+
(Note: Claims flow CSFeeDistributor -> CSAccounting -> reward_address)
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
reward_address: The operator's reward address
|
|
348
|
+
start_block: Starting block number (default: CSM deployment ~20873000)
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
List of withdrawal events with block, tx_hash, shares, and timestamp
|
|
352
|
+
"""
|
|
353
|
+
if start_block is None:
|
|
354
|
+
start_block = 20873000 # CSM deployment block
|
|
355
|
+
|
|
356
|
+
reward_address = Web3.to_checksum_address(reward_address)
|
|
357
|
+
csaccounting_address = self.settings.csaccounting_address
|
|
358
|
+
|
|
359
|
+
# 1. Try Etherscan API first (most reliable)
|
|
360
|
+
etherscan = EtherscanProvider()
|
|
361
|
+
if etherscan.is_available():
|
|
362
|
+
events = await etherscan.get_transfer_events(
|
|
363
|
+
token_address=self.settings.steth_address,
|
|
364
|
+
from_address=csaccounting_address,
|
|
365
|
+
to_address=reward_address,
|
|
366
|
+
from_block=start_block,
|
|
367
|
+
)
|
|
368
|
+
if events:
|
|
369
|
+
# Enrich with block timestamps
|
|
370
|
+
return await self._enrich_withdrawal_events(events)
|
|
371
|
+
|
|
372
|
+
# 2. Try chunked RPC queries
|
|
373
|
+
events = await self._query_transfer_events_chunked(
|
|
374
|
+
csaccounting_address, reward_address, start_block
|
|
375
|
+
)
|
|
376
|
+
if events:
|
|
377
|
+
return await self._enrich_withdrawal_events(events)
|
|
378
|
+
|
|
379
|
+
return []
|
|
380
|
+
|
|
381
|
+
async def _query_transfer_events_chunked(
|
|
382
|
+
self,
|
|
383
|
+
from_address: str,
|
|
384
|
+
to_address: str,
|
|
385
|
+
start_block: int,
|
|
386
|
+
chunk_size: int = 10000,
|
|
387
|
+
) -> list[dict]:
|
|
388
|
+
"""Query Transfer events in smaller chunks."""
|
|
389
|
+
current_block = self.w3.eth.block_number
|
|
390
|
+
all_events = []
|
|
391
|
+
|
|
392
|
+
from_address = Web3.to_checksum_address(from_address)
|
|
393
|
+
to_address = Web3.to_checksum_address(to_address)
|
|
394
|
+
|
|
395
|
+
for from_blk in range(start_block, current_block, chunk_size):
|
|
396
|
+
to_blk = min(from_blk + chunk_size - 1, current_block)
|
|
397
|
+
try:
|
|
398
|
+
events = self.steth.events.Transfer.get_logs(
|
|
399
|
+
from_block=from_blk,
|
|
400
|
+
to_block=to_blk,
|
|
401
|
+
argument_filters={
|
|
402
|
+
"from": from_address,
|
|
403
|
+
"to": to_address,
|
|
404
|
+
},
|
|
405
|
+
)
|
|
406
|
+
for e in events:
|
|
407
|
+
all_events.append(
|
|
408
|
+
{
|
|
409
|
+
"block": e["blockNumber"],
|
|
410
|
+
"tx_hash": e["transactionHash"].hex(),
|
|
411
|
+
"value": e["args"]["value"],
|
|
412
|
+
}
|
|
413
|
+
)
|
|
414
|
+
except Exception:
|
|
415
|
+
# If chunked queries fail, give up on this method
|
|
416
|
+
return []
|
|
417
|
+
|
|
418
|
+
return sorted(all_events, key=lambda x: x["block"])
|
|
419
|
+
|
|
420
|
+
async def _enrich_withdrawal_events(self, events: list[dict]) -> list[dict]:
|
|
421
|
+
"""Add timestamps and ETH values to withdrawal events."""
|
|
422
|
+
from datetime import datetime, timezone
|
|
423
|
+
|
|
424
|
+
enriched = []
|
|
425
|
+
for event in events:
|
|
426
|
+
try:
|
|
427
|
+
# Get block timestamp
|
|
428
|
+
block = self.w3.eth.get_block(event["block"])
|
|
429
|
+
timestamp = datetime.fromtimestamp(
|
|
430
|
+
block["timestamp"], tz=timezone.utc
|
|
431
|
+
).isoformat()
|
|
432
|
+
|
|
433
|
+
# Convert shares to ETH (using current rate as approximation)
|
|
434
|
+
eth_value = await self.shares_to_eth(event["value"])
|
|
435
|
+
|
|
436
|
+
enriched.append(
|
|
437
|
+
{
|
|
438
|
+
"block_number": event["block"],
|
|
439
|
+
"timestamp": timestamp,
|
|
440
|
+
"shares": event["value"],
|
|
441
|
+
"eth_value": float(eth_value),
|
|
442
|
+
"tx_hash": event["tx_hash"],
|
|
443
|
+
}
|
|
444
|
+
)
|
|
445
|
+
except Exception:
|
|
446
|
+
# Skip events we can't enrich
|
|
447
|
+
continue
|
|
448
|
+
|
|
449
|
+
return enriched
|
src/data/strikes.py
CHANGED
|
@@ -13,6 +13,22 @@ from ..core.config import get_settings
|
|
|
13
13
|
from .cache import cached
|
|
14
14
|
|
|
15
15
|
|
|
16
|
+
# Strike thresholds by operator type (curve_id)
|
|
17
|
+
# Default (Permissionless): 3 strikes till key exit
|
|
18
|
+
# ICS (Identified Community Staker): 4 strikes till key exit
|
|
19
|
+
STRIKE_THRESHOLDS = {
|
|
20
|
+
0: 3, # Permissionless (Legacy)
|
|
21
|
+
1: 4, # ICS/Legacy EA
|
|
22
|
+
2: 3, # Permissionless (current)
|
|
23
|
+
}
|
|
24
|
+
DEFAULT_STRIKE_THRESHOLD = 3
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_strike_threshold(curve_id: int) -> int:
|
|
28
|
+
"""Get the strike threshold for ejection based on operator curve_id."""
|
|
29
|
+
return STRIKE_THRESHOLDS.get(curve_id, DEFAULT_STRIKE_THRESHOLD)
|
|
30
|
+
|
|
31
|
+
|
|
16
32
|
@dataclass
|
|
17
33
|
class ValidatorStrikes:
|
|
18
34
|
"""Strike information for a single validator."""
|
|
@@ -20,7 +36,8 @@ class ValidatorStrikes:
|
|
|
20
36
|
pubkey: str
|
|
21
37
|
strikes: list[int] # Array of 6 values (0 or 1) representing strikes per frame
|
|
22
38
|
strike_count: int # Total strikes in the 6-frame window
|
|
23
|
-
|
|
39
|
+
strike_threshold: int # Number of strikes required for ejection (3 or 4)
|
|
40
|
+
at_ejection_risk: bool # True if strike_count >= strike_threshold
|
|
24
41
|
|
|
25
42
|
|
|
26
43
|
class StrikesProvider:
|
|
@@ -141,12 +158,16 @@ class StrikesProvider:
|
|
|
141
158
|
return None
|
|
142
159
|
return await self._fetch_tree_from_ipfs(cid)
|
|
143
160
|
|
|
144
|
-
async def get_operator_strikes(
|
|
161
|
+
async def get_operator_strikes(
|
|
162
|
+
self, operator_id: int, curve_id: int | None = None
|
|
163
|
+
) -> list[ValidatorStrikes]:
|
|
145
164
|
"""
|
|
146
165
|
Get strikes for all validators belonging to an operator.
|
|
147
166
|
|
|
148
167
|
Args:
|
|
149
168
|
operator_id: The CSM operator ID
|
|
169
|
+
curve_id: The operator's bond curve ID (determines strike threshold)
|
|
170
|
+
If None, defaults to 3 strikes (permissionless threshold)
|
|
150
171
|
|
|
151
172
|
Returns:
|
|
152
173
|
List of ValidatorStrikes for validators with any strikes.
|
|
@@ -159,6 +180,9 @@ class StrikesProvider:
|
|
|
159
180
|
values = tree_data.get("values", [])
|
|
160
181
|
operator_strikes = []
|
|
161
182
|
|
|
183
|
+
# Determine strike threshold based on operator type
|
|
184
|
+
strike_threshold = get_strike_threshold(curve_id) if curve_id is not None else DEFAULT_STRIKE_THRESHOLD
|
|
185
|
+
|
|
162
186
|
for entry in values:
|
|
163
187
|
value = entry.get("value", [])
|
|
164
188
|
if len(value) < 3:
|
|
@@ -179,33 +203,42 @@ class StrikesProvider:
|
|
|
179
203
|
pubkey=pubkey,
|
|
180
204
|
strikes=strikes_array if isinstance(strikes_array, list) else [],
|
|
181
205
|
strike_count=strike_count,
|
|
182
|
-
|
|
206
|
+
strike_threshold=strike_threshold,
|
|
207
|
+
at_ejection_risk=strike_count >= strike_threshold,
|
|
183
208
|
)
|
|
184
209
|
)
|
|
185
210
|
|
|
186
211
|
return operator_strikes
|
|
187
212
|
|
|
188
213
|
async def get_operator_strike_summary(
|
|
189
|
-
self, operator_id: int
|
|
214
|
+
self, operator_id: int, curve_id: int | None = None
|
|
190
215
|
) -> dict[str, int]:
|
|
191
216
|
"""
|
|
192
217
|
Get a summary of strikes for an operator.
|
|
193
218
|
|
|
219
|
+
Args:
|
|
220
|
+
operator_id: The CSM operator ID
|
|
221
|
+
curve_id: The operator's bond curve ID (determines strike threshold)
|
|
222
|
+
|
|
194
223
|
Returns:
|
|
195
224
|
Dict with:
|
|
196
225
|
- total_validators_with_strikes: Count of validators with any strikes
|
|
197
|
-
- validators_at_risk: Count of validators
|
|
226
|
+
- validators_at_risk: Count of validators at ejection risk (>= threshold)
|
|
227
|
+
- validators_near_ejection: Count one strike away from ejection
|
|
198
228
|
- total_strikes: Sum of all strikes across all validators
|
|
199
229
|
- max_strikes: Highest strike count on any single validator
|
|
230
|
+
- strike_threshold: The ejection threshold for this operator type
|
|
200
231
|
"""
|
|
201
|
-
strikes = await self.get_operator_strikes(operator_id)
|
|
232
|
+
strikes = await self.get_operator_strikes(operator_id, curve_id)
|
|
233
|
+
strike_threshold = get_strike_threshold(curve_id) if curve_id is not None else DEFAULT_STRIKE_THRESHOLD
|
|
202
234
|
|
|
203
235
|
return {
|
|
204
236
|
"total_validators_with_strikes": len(strikes),
|
|
205
237
|
"validators_at_risk": sum(1 for s in strikes if s.at_ejection_risk),
|
|
206
|
-
"validators_near_ejection": sum(1 for s in strikes if s.strike_count ==
|
|
238
|
+
"validators_near_ejection": sum(1 for s in strikes if s.strike_count == strike_threshold - 1),
|
|
207
239
|
"total_strikes": sum(s.strike_count for s in strikes),
|
|
208
240
|
"max_strikes": max((s.strike_count for s in strikes), default=0),
|
|
241
|
+
"strike_threshold": strike_threshold,
|
|
209
242
|
}
|
|
210
243
|
|
|
211
244
|
def clear_cache(self) -> None:
|