csm-dashboard 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
src/data/ipfs_logs.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """IPFS distribution log fetching with persistent caching."""
2
2
 
3
+ import asyncio
3
4
  import json
5
+ import logging
4
6
  import time
5
7
  from dataclasses import dataclass
6
8
  from datetime import datetime, timezone
@@ -11,6 +13,8 @@ import httpx
11
13
 
12
14
  from ..core.config import get_settings
13
15
 
16
+ logger = logging.getLogger(__name__)
17
+
14
18
 
15
19
  # Ethereum Beacon Chain genesis timestamp (Dec 1, 2020 12:00:23 UTC)
16
20
  BEACON_GENESIS = 1606824023
@@ -40,20 +44,17 @@ class FrameData:
40
44
  class IPFSLogProvider:
41
45
  """Fetches and caches historical distribution logs from IPFS."""
42
46
 
43
- # IPFS gateways to try in order
44
- GATEWAYS = [
45
- "https://ipfs.io/ipfs/",
46
- "https://cloudflare-ipfs.com/ipfs/",
47
- ]
48
-
49
47
  # Rate limiting: minimum seconds between gateway requests
50
48
  MIN_REQUEST_INTERVAL = 1.0
51
49
 
52
50
  def __init__(self, cache_dir: Path | None = None):
53
51
  self.settings = get_settings()
52
+ # Use configurable gateways from settings (comma-separated)
53
+ self.gateways = [g.strip() for g in self.settings.ipfs_gateways.split(",") if g.strip()]
54
54
  self.cache_dir = cache_dir or Path.home() / ".cache" / "csm-dashboard" / "ipfs"
55
55
  self.cache_dir.mkdir(parents=True, exist_ok=True)
56
56
  self._last_request_time = 0.0
57
+ self._rate_limit_lock = asyncio.Lock()
57
58
 
58
59
  def _get_cache_path(self, cid: str) -> Path:
59
60
  """Get the cache file path for a CID."""
@@ -80,13 +81,14 @@ class IPFSLogProvider:
80
81
  except OSError:
81
82
  pass # Cache write failure is non-fatal
82
83
 
83
- def _rate_limit(self) -> None:
84
- """Ensure minimum interval between IPFS gateway requests."""
85
- now = time.time()
86
- elapsed = now - self._last_request_time
87
- if elapsed < self.MIN_REQUEST_INTERVAL:
88
- time.sleep(self.MIN_REQUEST_INTERVAL - elapsed)
89
- self._last_request_time = time.time()
84
+ async def _rate_limit(self) -> None:
85
+ """Ensure minimum interval between IPFS gateway requests (async-safe)."""
86
+ async with self._rate_limit_lock:
87
+ now = time.time()
88
+ elapsed = now - self._last_request_time
89
+ if elapsed < self.MIN_REQUEST_INTERVAL:
90
+ await asyncio.sleep(self.MIN_REQUEST_INTERVAL - elapsed)
91
+ self._last_request_time = time.time()
90
92
 
91
93
  async def fetch_log(self, cid: str) -> dict | None:
92
94
  """
@@ -100,26 +102,32 @@ class IPFSLogProvider:
100
102
  if cached is not None:
101
103
  return cached
102
104
 
103
- # Rate limit gateway requests
104
- self._rate_limit()
105
+ # Rate limit gateway requests (async-safe)
106
+ await self._rate_limit()
105
107
 
106
108
  # Try each gateway
107
109
  async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
108
- for gateway in self.GATEWAYS:
110
+ for gateway in self.gateways:
109
111
  try:
110
112
  url = f"{gateway}{cid}"
111
113
  response = await client.get(url)
112
114
  if response.status_code == 200:
113
- data = response.json()
115
+ try:
116
+ data = response.json()
117
+ except json.JSONDecodeError as e:
118
+ logger.warning(f"Failed to parse IPFS JSON from {gateway}: {e}")
119
+ continue
114
120
  # The IPFS log is wrapped in a list, unwrap it
115
121
  if isinstance(data, list) and len(data) == 1:
116
122
  data = data[0]
117
123
  # Cache the successful result
118
124
  self._save_to_cache(cid, data)
119
125
  return data
120
- except Exception:
126
+ except Exception as e:
127
+ logger.debug(f"IPFS gateway {gateway} failed for CID {cid}: {e}")
121
128
  continue # Try next gateway
122
129
 
130
+ logger.warning(f"All IPFS gateways failed for CID {cid}")
123
131
  return None
124
132
 
125
133
  def get_operator_frame_rewards(self, log_data: dict, operator_id: int) -> int | None:
@@ -289,9 +297,10 @@ class IPFSLogProvider:
289
297
  total_rewards_eth = Decimal(total_rewards_wei) / Decimal(10**18)
290
298
 
291
299
  # Annualize: (rewards / bond) * (365 / days) * 100
292
- apy = float(total_rewards_eth / bond_eth) * (365.0 / total_days) * 100
300
+ # Keep calculation in Decimal for precision, convert to float only at the end
301
+ apy = (total_rewards_eth / bond_eth) * Decimal(365) / Decimal(total_days) * Decimal(100)
293
302
 
294
- results[self._period_name(period)] = round(apy, 2)
303
+ results[self._period_name(period)] = round(float(apy), 2)
295
304
 
296
305
  return results
297
306
 
src/data/lido_api.py CHANGED
@@ -1,10 +1,14 @@
1
1
  """Lido protocol API for stETH APR and other metrics."""
2
2
 
3
+ import logging
4
+
3
5
  import httpx
4
6
 
5
7
  from ..core.config import get_settings
6
8
  from .cache import cached
7
9
 
10
+ logger = logging.getLogger(__name__)
11
+
8
12
  LIDO_API_BASE = "https://eth-api.lido.fi/v1"
9
13
  LIDO_SUBGRAPH_ID = "Sxx812XgeKyzQPaBpR5YZWmGV5fZuBaPdh7DFhzSwiQ"
10
14
 
@@ -27,12 +31,14 @@ class LidoAPIProvider:
27
31
 
28
32
  if response.status_code == 200:
29
33
  data = response.json()
34
+ # Handle case where data["data"] could be explicitly None
35
+ data_obj = data.get("data") or {}
30
36
  return {
31
- "apr": float(data.get("data", {}).get("smaApr", 0)),
32
- "timestamp": data.get("data", {}).get("timeUnix"),
37
+ "apr": float(data_obj.get("smaApr", 0) or 0),
38
+ "timestamp": data_obj.get("timeUnix"),
33
39
  }
34
- except Exception:
35
- pass
40
+ except Exception as e:
41
+ logger.warning(f"Failed to fetch stETH APR from Lido API: {e}")
36
42
 
37
43
  return {"apr": None, "timestamp": None}
38
44
 
@@ -73,8 +79,8 @@ class LidoAPIProvider:
73
79
  results = data.get("data", {}).get("totalRewards", [])
74
80
  # Reverse to get ascending order (oldest to newest) for binary search
75
81
  return list(reversed(results))
76
- except Exception:
77
- pass
82
+ except Exception as e:
83
+ logger.warning(f"Failed to fetch historical APR from TheGraph: {e}")
78
84
 
79
85
  return []
80
86
 
@@ -89,13 +95,21 @@ class LidoAPIProvider:
89
95
  # Find the closest report at or before target_block
90
96
  closest = None
91
97
  for entry in apr_data:
92
- block = int(entry["block"])
98
+ try:
99
+ block = int(entry.get("block", 0))
100
+ except (ValueError, TypeError):
101
+ continue
93
102
  if block <= target_block:
94
103
  closest = entry
95
104
  else:
96
105
  break # apr_data is sorted ascending
97
106
 
98
- return float(closest["apr"]) if closest else None
107
+ if closest:
108
+ try:
109
+ return float(closest.get("apr", 0))
110
+ except (ValueError, TypeError):
111
+ return None
112
+ return None
99
113
 
100
114
  def get_average_apr_for_range(
101
115
  self, apr_data: list[dict], start_timestamp: int, end_timestamp: int
@@ -121,7 +135,10 @@ class LidoAPIProvider:
121
135
  closest_before = None
122
136
 
123
137
  for entry in apr_data:
124
- block_time = int(entry["blockTime"])
138
+ try:
139
+ block_time = int(entry.get("blockTime", 0))
140
+ except (ValueError, TypeError):
141
+ continue
125
142
  if block_time < start_timestamp:
126
143
  closest_before = entry # Keep track of most recent before range
127
144
  elif block_time <= end_timestamp:
@@ -131,10 +148,19 @@ class LidoAPIProvider:
131
148
 
132
149
  if reports_in_range:
133
150
  # Average all reports within the range
134
- total_apr = sum(float(r["apr"]) for r in reports_in_range)
135
- return total_apr / len(reports_in_range)
151
+ valid_aprs = []
152
+ for r in reports_in_range:
153
+ try:
154
+ valid_aprs.append(float(r.get("apr", 0)))
155
+ except (ValueError, TypeError):
156
+ continue
157
+ if valid_aprs:
158
+ return sum(valid_aprs) / len(valid_aprs)
136
159
  elif closest_before:
137
160
  # No reports in range, use the closest one before
138
- return float(closest_before["apr"])
161
+ try:
162
+ return float(closest_before.get("apr", 0))
163
+ except (ValueError, TypeError):
164
+ pass
139
165
 
140
166
  return None
src/data/onchain.py CHANGED
@@ -1,10 +1,14 @@
1
1
  """On-chain data fetching via Web3."""
2
2
 
3
3
  import asyncio
4
+ import logging
4
5
  from decimal import Decimal
6
+ from functools import partial
5
7
 
6
8
  from web3 import Web3
7
9
 
10
+ logger = logging.getLogger(__name__)
11
+
8
12
  from ..core.config import get_settings
9
13
  from ..core.contracts import (
10
14
  CSACCOUNTING_ABI,
@@ -24,7 +28,12 @@ class OnChainDataProvider:
24
28
 
25
29
  def __init__(self, rpc_url: str | None = None):
26
30
  self.settings = get_settings()
27
- self.w3 = Web3(Web3.HTTPProvider(rpc_url or self.settings.eth_rpc_url))
31
+ self.w3 = Web3(
32
+ Web3.HTTPProvider(
33
+ rpc_url or self.settings.eth_rpc_url,
34
+ request_kwargs={"timeout": 30},
35
+ )
36
+ )
28
37
 
29
38
  # Initialize contracts
30
39
  self.csmodule = self.w3.eth.contract(
@@ -51,12 +60,16 @@ class OnChainDataProvider:
51
60
  @cached(ttl=60)
52
61
  async def get_node_operators_count(self) -> int:
53
62
  """Get total number of node operators."""
54
- return self.csmodule.functions.getNodeOperatorsCount().call()
63
+ return await asyncio.to_thread(
64
+ self.csmodule.functions.getNodeOperatorsCount().call
65
+ )
55
66
 
56
67
  @cached(ttl=300)
57
68
  async def get_node_operator(self, operator_id: int) -> NodeOperator:
58
69
  """Get node operator data by ID."""
59
- data = self.csmodule.functions.getNodeOperator(operator_id).call()
70
+ data = await asyncio.to_thread(
71
+ self.csmodule.functions.getNodeOperator(operator_id).call
72
+ )
60
73
  return NodeOperator(
61
74
  node_operator_id=operator_id,
62
75
  total_added_keys=data[0],
@@ -95,10 +108,13 @@ class OnChainDataProvider:
95
108
 
96
109
  if batch_supported:
97
110
  try:
98
- with self.w3.batch_requests() as batch:
99
- for op_id in range(start, end):
100
- batch.add(self.csmodule.functions.getNodeOperator(op_id))
101
- results = batch.execute()
111
+ def run_batch():
112
+ with self.w3.batch_requests() as batch:
113
+ for op_id in range(start, end):
114
+ batch.add(self.csmodule.functions.getNodeOperator(op_id))
115
+ return batch.execute()
116
+
117
+ results = await asyncio.to_thread(run_batch)
102
118
 
103
119
  for i, data in enumerate(results):
104
120
  op_id = start + i
@@ -114,7 +130,9 @@ class OnChainDataProvider:
114
130
  # Sequential fallback with rate limiting
115
131
  for op_id in range(start, end):
116
132
  try:
117
- data = self.csmodule.functions.getNodeOperator(op_id).call()
133
+ data = await asyncio.to_thread(
134
+ self.csmodule.functions.getNodeOperator(op_id).call
135
+ )
118
136
  manager = data[10]
119
137
  reward = data[12]
120
138
  if manager.lower() == address.lower() or reward.lower() == address.lower():
@@ -136,7 +154,9 @@ class OnChainDataProvider:
136
154
  1 = ICS/Legacy EA (1.5 ETH first validator, 1.3 ETH subsequent)
137
155
  """
138
156
  try:
139
- return self.csaccounting.functions.getBondCurveId(operator_id).call()
157
+ return await asyncio.to_thread(
158
+ self.csaccounting.functions.getBondCurveId(operator_id).call
159
+ )
140
160
  except Exception:
141
161
  # Fall back to 0 (Permissionless) if call fails
142
162
  return 0
@@ -166,8 +186,12 @@ class OnChainDataProvider:
166
186
  # Curve 2 is the current mainnet default (1.5 ETH first, 1.3 ETH subsequent)
167
187
  # Curve 0/1 were the original curves, now deprecated
168
188
  if curve_id == 0: # Original Permissionless (deprecated)
189
+ logger.debug(f"Using deprecated curve 0 bond calculation")
169
190
  first_bond = Decimal("2.0")
170
- else: # Curve 1, 2, etc - current default curves
191
+ elif curve_id not in (1, 2):
192
+ logger.warning(f"Unknown curve_id {curve_id}, using default bond calculation")
193
+ first_bond = Decimal("1.5")
194
+ else: # Curve 1, 2 - current default curves
171
195
  first_bond = Decimal("1.5")
172
196
 
173
197
  subsequent_bond = Decimal("1.3")
@@ -205,9 +229,9 @@ class OnChainDataProvider:
205
229
  @cached(ttl=60)
206
230
  async def get_bond_summary(self, operator_id: int) -> BondSummary:
207
231
  """Get bond summary for an operator."""
208
- current, required = self.csaccounting.functions.getBondSummary(
209
- operator_id
210
- ).call()
232
+ current, required = await asyncio.to_thread(
233
+ self.csaccounting.functions.getBondSummary(operator_id).call
234
+ )
211
235
 
212
236
  current_eth = Decimal(current) / Decimal(10**18)
213
237
  required_eth = Decimal(required) / Decimal(10**18)
@@ -224,14 +248,18 @@ class OnChainDataProvider:
224
248
  @cached(ttl=60)
225
249
  async def get_distributed_shares(self, operator_id: int) -> int:
226
250
  """Get already distributed (claimed) shares for operator."""
227
- return self.csfeedistributor.functions.distributedShares(operator_id).call()
251
+ return await asyncio.to_thread(
252
+ self.csfeedistributor.functions.distributedShares(operator_id).call
253
+ )
228
254
 
229
255
  @cached(ttl=60)
230
256
  async def shares_to_eth(self, shares: int) -> Decimal:
231
257
  """Convert stETH shares to ETH value."""
232
258
  if shares == 0:
233
259
  return Decimal(0)
234
- eth_wei = self.steth.functions.getPooledEthByShares(shares).call()
260
+ eth_wei = await asyncio.to_thread(
261
+ self.steth.functions.getPooledEthByShares(shares).call
262
+ )
235
263
  return Decimal(eth_wei) / Decimal(10**18)
236
264
 
237
265
  async def get_signing_keys(
@@ -246,9 +274,11 @@ class OnChainDataProvider:
246
274
 
247
275
  for batch_start in range(start, start + count, batch_size):
248
276
  batch_count = min(batch_size, start + count - batch_start)
249
- keys_bytes = self.csmodule.functions.getSigningKeys(
250
- operator_id, batch_start, batch_count
251
- ).call()
277
+ keys_bytes = await asyncio.to_thread(
278
+ self.csmodule.functions.getSigningKeys(
279
+ operator_id, batch_start, batch_count
280
+ ).call
281
+ )
252
282
  # Each key is 48 bytes
253
283
  for i in range(0, len(keys_bytes), 48):
254
284
  key = "0x" + keys_bytes[i : i + 48].hex()
@@ -256,9 +286,11 @@ class OnChainDataProvider:
256
286
 
257
287
  return keys
258
288
 
259
- def get_current_log_cid(self) -> str:
289
+ async def get_current_log_cid(self) -> str:
260
290
  """Get the current distribution log CID from the contract."""
261
- return self.csfeedistributor.functions.logCid().call()
291
+ return await asyncio.to_thread(
292
+ self.csfeedistributor.functions.logCid().call
293
+ )
262
294
 
263
295
  @cached(ttl=3600) # Cache for 1 hour since historical events don't change
264
296
  async def get_distribution_log_history(
@@ -304,12 +336,14 @@ class OnChainDataProvider:
304
336
 
305
337
  # 4. Ultimate fallback: current logCid only
306
338
  try:
307
- current_cid = self.get_current_log_cid()
339
+ current_cid = await self.get_current_log_cid()
308
340
  if current_cid:
309
- current_block = self.w3.eth.block_number
341
+ current_block = await asyncio.to_thread(
342
+ lambda: self.w3.eth.block_number
343
+ )
310
344
  return [{"block": current_block, "logCid": current_cid}]
311
- except Exception:
312
- pass
345
+ except Exception as e:
346
+ logger.debug(f"Failed to get current log CID as fallback: {e}")
313
347
 
314
348
  return []
315
349
 
@@ -317,15 +351,18 @@ class OnChainDataProvider:
317
351
  self, start_block: int, chunk_size: int = 10000
318
352
  ) -> list[dict]:
319
353
  """Query events in smaller chunks to work around RPC limitations."""
320
- current_block = self.w3.eth.block_number
354
+ current_block = await asyncio.to_thread(lambda: self.w3.eth.block_number)
321
355
  all_events = []
322
356
 
323
357
  for from_block in range(start_block, current_block, chunk_size):
324
358
  to_block = min(from_block + chunk_size - 1, current_block)
325
359
  try:
326
- events = self.csfeedistributor.events.DistributionLogUpdated.get_logs(
327
- from_block=from_block,
328
- to_block=to_block,
360
+ events = await asyncio.to_thread(
361
+ partial(
362
+ self.csfeedistributor.events.DistributionLogUpdated.get_logs,
363
+ from_block=from_block,
364
+ to_block=to_block,
365
+ )
329
366
  )
330
367
  for e in events:
331
368
  all_events.append(
@@ -443,7 +480,7 @@ class OnChainDataProvider:
443
480
  chunk_size: int = 10000,
444
481
  ) -> list[dict]:
445
482
  """Query WithdrawalRequested events in chunks via RPC."""
446
- current_block = self.w3.eth.block_number
483
+ current_block = await asyncio.to_thread(lambda: self.w3.eth.block_number)
447
484
  all_events = []
448
485
 
449
486
  requestor = Web3.to_checksum_address(requestor)
@@ -452,13 +489,16 @@ class OnChainDataProvider:
452
489
  for from_blk in range(start_block, current_block, chunk_size):
453
490
  to_blk = min(from_blk + chunk_size - 1, current_block)
454
491
  try:
455
- events = self.withdrawal_queue.events.WithdrawalRequested.get_logs(
456
- from_block=from_blk,
457
- to_block=to_blk,
458
- argument_filters={
459
- "requestor": requestor,
460
- "owner": owner,
461
- },
492
+ events = await asyncio.to_thread(
493
+ partial(
494
+ self.withdrawal_queue.events.WithdrawalRequested.get_logs,
495
+ from_block=from_blk,
496
+ to_block=to_blk,
497
+ argument_filters={
498
+ "requestor": requestor,
499
+ "owner": owner,
500
+ },
501
+ )
462
502
  )
463
503
  for e in events:
464
504
  all_events.append(
@@ -488,9 +528,9 @@ class OnChainDataProvider:
488
528
  # Get status for all request IDs
489
529
  request_ids = [e["request_id"] for e in events]
490
530
  try:
491
- statuses = self.withdrawal_queue.functions.getWithdrawalStatus(
492
- request_ids
493
- ).call()
531
+ statuses = await asyncio.to_thread(
532
+ self.withdrawal_queue.functions.getWithdrawalStatus(request_ids).call
533
+ )
494
534
  except Exception:
495
535
  # If status query fails, set all as unknown
496
536
  statuses = [None] * len(events)
@@ -503,7 +543,9 @@ class OnChainDataProvider:
503
543
  for i, event in enumerate(events):
504
544
  try:
505
545
  # Get block timestamp
506
- block = self.w3.eth.get_block(event["block"])
546
+ block = await asyncio.to_thread(
547
+ partial(self.w3.eth.get_block, event["block"])
548
+ )
507
549
  timestamp = datetime.fromtimestamp(
508
550
  block["timestamp"], tz=timezone.utc
509
551
  ).isoformat()
@@ -542,15 +584,18 @@ class OnChainDataProvider:
542
584
  enriched_event["claim_tx_hash"] = claim["tx_hash"]
543
585
  # Get claim timestamp
544
586
  try:
545
- claim_block = self.w3.eth.get_block(claim["block"])
587
+ claim_block = await asyncio.to_thread(
588
+ partial(self.w3.eth.get_block, claim["block"])
589
+ )
546
590
  enriched_event["claim_timestamp"] = datetime.fromtimestamp(
547
591
  claim_block["timestamp"], tz=timezone.utc
548
592
  ).isoformat()
549
- except Exception:
550
- pass
593
+ except Exception as e:
594
+ logger.debug(f"Failed to get claim timestamp for block {claim.get('block')}: {e}")
551
595
 
552
596
  enriched.append(enriched_event)
553
- except Exception:
597
+ except Exception as e:
598
+ logger.debug(f"Failed to enrich withdrawal event: {e}")
554
599
  continue
555
600
 
556
601
  return enriched
@@ -573,16 +618,19 @@ class OnChainDataProvider:
573
618
  return events
574
619
 
575
620
  # RPC fallback - query in chunks
576
- current_block = self.w3.eth.block_number
621
+ current_block = await asyncio.to_thread(lambda: self.w3.eth.block_number)
577
622
  all_events = []
578
623
 
579
624
  for from_blk in range(start_block, current_block, 10000):
580
625
  to_blk = min(from_blk + 9999, current_block)
581
626
  try:
582
- logs = self.withdrawal_queue.events.WithdrawalClaimed.get_logs(
583
- from_block=from_blk,
584
- to_block=to_blk,
585
- argument_filters={"receiver": receiver},
627
+ logs = await asyncio.to_thread(
628
+ partial(
629
+ self.withdrawal_queue.events.WithdrawalClaimed.get_logs,
630
+ from_block=from_blk,
631
+ to_block=to_blk,
632
+ argument_filters={"receiver": receiver},
633
+ )
586
634
  )
587
635
  for e in logs:
588
636
  all_events.append(
@@ -606,7 +654,7 @@ class OnChainDataProvider:
606
654
  chunk_size: int = 10000,
607
655
  ) -> list[dict]:
608
656
  """Query Transfer events in smaller chunks."""
609
- current_block = self.w3.eth.block_number
657
+ current_block = await asyncio.to_thread(lambda: self.w3.eth.block_number)
610
658
  all_events = []
611
659
 
612
660
  from_address = Web3.to_checksum_address(from_address)
@@ -615,13 +663,16 @@ class OnChainDataProvider:
615
663
  for from_blk in range(start_block, current_block, chunk_size):
616
664
  to_blk = min(from_blk + chunk_size - 1, current_block)
617
665
  try:
618
- events = self.steth.events.Transfer.get_logs(
619
- from_block=from_blk,
620
- to_block=to_blk,
621
- argument_filters={
622
- "from": from_address,
623
- "to": to_address,
624
- },
666
+ events = await asyncio.to_thread(
667
+ partial(
668
+ self.steth.events.Transfer.get_logs,
669
+ from_block=from_blk,
670
+ to_block=to_blk,
671
+ argument_filters={
672
+ "from": from_address,
673
+ "to": to_address,
674
+ },
675
+ )
625
676
  )
626
677
  for e in events:
627
678
  all_events.append(
@@ -645,7 +696,9 @@ class OnChainDataProvider:
645
696
  for event in events:
646
697
  try:
647
698
  # Get block timestamp
648
- block = self.w3.eth.get_block(event["block"])
699
+ block = await asyncio.to_thread(
700
+ partial(self.w3.eth.get_block, event["block"])
701
+ )
649
702
  timestamp = datetime.fromtimestamp(
650
703
  block["timestamp"], tz=timezone.utc
651
704
  ).isoformat()
src/data/price.py ADDED
@@ -0,0 +1,46 @@
1
+ """Price fetching from CoinGecko API."""
2
+
3
+ import logging
4
+ import time
5
+
6
+ import httpx
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # Cache ETH price for 5 minutes
11
+ _price_cache: dict = {"eth_usd": None, "timestamp": 0}
12
+ CACHE_TTL = 300 # 5 minutes
13
+
14
+
15
+ async def get_eth_price() -> float | None:
16
+ """Fetch current ETH price in USD from CoinGecko.
17
+
18
+ Returns:
19
+ ETH price in USD, or None if fetch fails
20
+ """
21
+ global _price_cache
22
+
23
+ # Check cache
24
+ now = time.time()
25
+ if _price_cache["eth_usd"] is not None and (now - _price_cache["timestamp"]) < CACHE_TTL:
26
+ return _price_cache["eth_usd"]
27
+
28
+ try:
29
+ async with httpx.AsyncClient(timeout=10.0) as client:
30
+ response = await client.get(
31
+ "https://api.coingecko.com/api/v3/simple/price",
32
+ params={"ids": "ethereum", "vs_currencies": "usd"},
33
+ )
34
+ if response.status_code == 200:
35
+ data = response.json()
36
+ price = data.get("ethereum", {}).get("usd")
37
+ if price:
38
+ _price_cache["eth_usd"] = float(price)
39
+ _price_cache["timestamp"] = now
40
+ logger.info(f"Fetched ETH price: ${price}")
41
+ return float(price)
42
+ except Exception as e:
43
+ logger.warning(f"Failed to fetch ETH price: {e}")
44
+
45
+ # Return cached value if available (even if stale)
46
+ return _price_cache["eth_usd"]
src/data/rewards_tree.py CHANGED
@@ -1,11 +1,16 @@
1
1
  """Fetch and parse the rewards merkle tree from GitHub."""
2
2
 
3
+ import json
4
+ import logging
5
+
3
6
  import httpx
4
7
 
5
8
  from ..core.config import get_settings
6
9
  from ..core.types import RewardsInfo
7
10
  from .cache import cached
8
11
 
12
+ logger = logging.getLogger(__name__)
13
+
9
14
 
10
15
  class RewardsTreeProvider:
11
16
  """Fetches rewards data from the csm-rewards repository."""
@@ -26,9 +31,19 @@ class RewardsTreeProvider:
26
31
  }
27
32
  """
28
33
  async with httpx.AsyncClient(timeout=30.0) as client:
29
- response = await client.get(self.settings.rewards_proofs_url)
30
- response.raise_for_status()
31
- return response.json()
34
+ try:
35
+ response = await client.get(self.settings.rewards_proofs_url)
36
+ response.raise_for_status()
37
+ return response.json()
38
+ except httpx.HTTPStatusError as e:
39
+ logger.warning(f"Failed to fetch rewards tree: HTTP {e.response.status_code}")
40
+ return {}
41
+ except httpx.RequestError as e:
42
+ logger.warning(f"Failed to fetch rewards tree: {e}")
43
+ return {}
44
+ except json.JSONDecodeError as e:
45
+ logger.warning(f"Failed to parse rewards tree JSON: {e}")
46
+ return {}
32
47
 
33
48
  async def get_operator_rewards(self, operator_id: int) -> RewardsInfo | None:
34
49
  """Get rewards info for a specific operator."""