dexscreen 0.0.1__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,10 +4,14 @@ Unified streaming interface
4
4
 
5
5
  import asyncio
6
6
  import logging
7
+ import time
7
8
  from abc import ABC, abstractmethod
8
9
  from typing import Callable, Optional
9
10
 
11
+ from ..core.exceptions import HttpError
10
12
  from ..core.models import TokenPair
13
+ from ..utils.logging_config import get_contextual_logger
14
+ from ..utils.retry import RetryConfig, RetryManager, RetryPresets
11
15
 
12
16
  logger = logging.getLogger(__name__)
13
17
 
@@ -20,6 +24,18 @@ class StreamingClient(ABC):
20
24
  self.running = False
21
25
  self.callback_errors: dict[str, int] = {} # Track errors per subscription
22
26
 
27
+ # Enhanced logging
28
+ self.contextual_logger = get_contextual_logger(__name__)
29
+
30
+ # Streaming statistics
31
+ self.stats = {
32
+ "total_subscriptions": 0,
33
+ "active_subscriptions": 0,
34
+ "total_emissions": 0,
35
+ "total_callback_errors": 0,
36
+ "last_emission_time": None,
37
+ }
38
+
23
39
  @abstractmethod
24
40
  async def connect(self):
25
41
  """Establish connection"""
@@ -89,18 +105,58 @@ class StreamingClient(ABC):
89
105
  return self.callback_errors.get(key, 0)
90
106
  return sum(self.callback_errors.values())
91
107
 
108
+ def get_streaming_stats(self) -> dict:
109
+ """Get comprehensive streaming statistics"""
110
+ combined_stats = self.stats.copy()
111
+ if hasattr(self, "polling_stats"):
112
+ combined_stats.update(self.polling_stats) # type: ignore[attr-defined]
113
+ combined_stats.update(
114
+ {
115
+ "total_callback_errors": sum(self.callback_errors.values()),
116
+ "subscriptions_with_errors": len([k for k, v in self.callback_errors.items() if v > 0]),
117
+ "running": self.running,
118
+ }
119
+ )
120
+ return combined_stats
121
+
92
122
 
93
123
  class PollingStream(StreamingClient):
94
124
  """Polling implementation with streaming interface"""
95
125
 
96
- def __init__(self, dexscreener_client, interval: float = 1.0, filter_changes: bool = True):
126
+ def __init__(
127
+ self,
128
+ dexscreener_client,
129
+ interval: float = 1.0,
130
+ filter_changes: bool = True,
131
+ retry_config: Optional[RetryConfig] = None,
132
+ ):
97
133
  super().__init__()
98
134
  self.dexscreener_client = dexscreener_client # The main DexscreenerClient instance
99
135
  self.interval = interval # Default interval
100
136
  self.filter_changes = filter_changes # Whether to filter for changes
137
+ self.retry_config = retry_config or RetryPresets.network_operations() # Conservative retry for polling
101
138
  self.tasks: dict[str, asyncio.Task] = {}
102
139
  self._cache: dict[str, Optional[TokenPair]] = {}
103
140
 
141
+ # Enhanced polling statistics
142
+ self.polling_stats = {
143
+ "total_polls": 0,
144
+ "successful_polls": 0,
145
+ "failed_polls": 0,
146
+ "cache_hits": 0,
147
+ "cache_misses": 0,
148
+ "average_poll_duration": 0.0,
149
+ "last_poll_time": None,
150
+ }
151
+
152
+ init_context = {
153
+ "interval": interval,
154
+ "filter_changes": filter_changes,
155
+ "polling_mode": "http",
156
+ }
157
+
158
+ self.contextual_logger.debug("PollingStream initialized", context=init_context)
159
+
104
160
  # Data structures for chain-based polling (max 30 per chain)
105
161
  self._chain_subscriptions: dict[str, set[str]] = {} # chain -> set of addresses
106
162
  self._chain_tasks: dict[str, asyncio.Task] = {} # chain -> polling task
@@ -114,8 +170,18 @@ class PollingStream(StreamingClient):
114
170
 
115
171
  async def connect(self):
116
172
  """Start streaming service"""
173
+ connect_context = {
174
+ "operation": "connect",
175
+ "previous_state": "running" if self.running else "stopped",
176
+ }
177
+
178
+ self.contextual_logger.info("Starting polling stream service", context=connect_context)
179
+
117
180
  self.running = True
118
181
 
182
+ connect_context.update({"current_state": "running"})
183
+ self.contextual_logger.info("Polling stream service started", context=connect_context)
184
+
119
185
  async def disconnect(self):
120
186
  """Stop all polling tasks"""
121
187
  self.running = False
@@ -227,7 +293,6 @@ class PollingStream(StreamingClient):
227
293
 
228
294
  async def _poll_chain(self, chain_id: str):
229
295
  """Poll all pairs for a specific chain (max 30 per chain)"""
230
- import time
231
296
 
232
297
  next_poll_time = time.time()
233
298
 
@@ -253,7 +318,6 @@ class PollingStream(StreamingClient):
253
318
 
254
319
  async def _batch_fetch_and_emit(self, chain_id: str):
255
320
  """Fetch multiple pairs for a chain and emit updates"""
256
- import time
257
321
 
258
322
  if chain_id not in self._chain_subscriptions:
259
323
  return
@@ -273,51 +337,151 @@ class PollingStream(StreamingClient):
273
337
  )
274
338
  addresses = addresses[:max_subscriptions]
275
339
 
276
- try:
277
- # Log API request time
278
- request_start = time.time()
279
-
280
- # Fetch all pairs in one request (max 30 due to limit above)
281
- pairs = await self.dexscreener_client.get_pairs_by_pairs_addresses_async(chain_id, addresses)
282
-
283
- request_end = time.time()
284
- request_duration = request_end - request_start
285
-
286
- logger.debug(
287
- "Batch fetch completed for chain %s: %d addresses, %d pairs returned in %.2fms",
288
- chain_id,
289
- len(addresses),
290
- len(pairs),
291
- request_duration * 1000,
292
- )
293
-
294
- # Create a mapping for quick lookup
295
- pairs_map = {pair.pair_address.lower(): pair for pair in pairs}
296
-
297
- # Process each address
298
- for address in addresses:
299
- key = f"{chain_id}:{address}"
300
- pair = pairs_map.get(address.lower())
301
-
302
- if pair:
303
- # Add request timing info to the pair object for debugging
304
- pair._request_duration = request_duration
305
- pair._request_time = request_end
306
-
307
- # Check if we should filter for changes
308
- if self.filter_changes:
309
- # Only emit if data changed
310
- if self._has_changed(key, pair):
311
- self._cache[key] = pair
340
+ retry_manager = RetryManager(self.retry_config)
341
+
342
+ while True:
343
+ try:
344
+ # Log API request time
345
+ request_start = time.time()
346
+
347
+ # Fetch all pairs in one request (max 30 due to limit above)
348
+ pairs = await self.dexscreener_client.get_pairs_by_pairs_addresses_async(chain_id, addresses)
349
+
350
+ request_end = time.time()
351
+ request_duration = request_end - request_start
352
+
353
+ # Update polling statistics
354
+ self.polling_stats["total_polls"] += 1
355
+ self.polling_stats["successful_polls"] += 1
356
+ self.polling_stats["last_poll_time"] = request_end
357
+
358
+ # Update average poll duration
359
+ total_polls = self.polling_stats["total_polls"]
360
+ current_avg = self.polling_stats["average_poll_duration"]
361
+ self.polling_stats["average_poll_duration"] = (
362
+ current_avg * (total_polls - 1) + request_duration
363
+ ) / total_polls
364
+
365
+ logger.debug(
366
+ "Batch fetch completed for chain %s: %d addresses, %d pairs returned in %.2fms",
367
+ chain_id,
368
+ len(addresses),
369
+ len(pairs),
370
+ request_duration * 1000,
371
+ )
372
+
373
+ # Create a mapping for quick lookup
374
+ pairs_map = {pair.pair_address.lower(): pair for pair in pairs}
375
+
376
+ # Process each address
377
+ for address in addresses:
378
+ key = f"{chain_id}:{address}"
379
+ pair = pairs_map.get(address.lower())
380
+
381
+ if pair:
382
+ # Add request timing info to the pair object for debugging
383
+ pair._request_duration = request_duration
384
+ pair._request_time = request_end
385
+
386
+ # Check if we should filter for changes
387
+ if self.filter_changes:
388
+ # Only emit if data changed
389
+ if self._has_changed(key, pair):
390
+ self._cache[key] = pair
391
+ await self._emit(chain_id, address, pair)
392
+ self.polling_stats["cache_misses"] += 1
393
+ else:
394
+ self.polling_stats["cache_hits"] += 1
395
+ else:
396
+ # Raw mode: emit every update
312
397
  await self._emit(chain_id, address, pair)
313
- else:
314
- # Raw mode: emit every update
315
- await self._emit(chain_id, address, pair)
316
398
 
317
- except Exception:
318
- logger.exception(
319
- "Polling error for chain %s with %d addresses", chain_id, len(addresses) if addresses else 0
320
- )
399
+ # Success - break out of retry loop
400
+ break
401
+
402
+ except Exception as e:
403
+ retry_manager.record_failure(e)
404
+
405
+ # Update error statistics
406
+ self.polling_stats["total_polls"] += 1
407
+ self.polling_stats["failed_polls"] += 1
408
+
409
+ if retry_manager.should_retry(e):
410
+ retry_context = {
411
+ "operation": "batch_fetch_retry",
412
+ "chain_id": chain_id,
413
+ "addresses_count": len(addresses),
414
+ "attempt": retry_manager.attempt,
415
+ "max_retries": self.retry_config.max_retries + 1,
416
+ "error_type": type(e).__name__,
417
+ "error_message": str(e),
418
+ "retry_delay": retry_manager.calculate_delay(),
419
+ }
420
+
421
+ self.contextual_logger.warning(
422
+ "Polling error for chain %s, retrying (attempt %d/%d): %s",
423
+ chain_id,
424
+ retry_manager.attempt,
425
+ self.retry_config.max_retries + 1,
426
+ str(e),
427
+ context=retry_context,
428
+ )
429
+
430
+ logger.warning(
431
+ "Polling error for chain %s with %d addresses (attempt %d/%d): %s. Retrying in %.2fs",
432
+ chain_id,
433
+ len(addresses),
434
+ retry_manager.attempt,
435
+ self.retry_config.max_retries + 1,
436
+ str(e),
437
+ retry_manager.calculate_delay(),
438
+ )
439
+ await retry_manager.wait_async()
440
+ continue
441
+ else:
442
+ # Max retries exceeded - log and continue to next poll cycle
443
+ final_error_context = {
444
+ "operation": "batch_fetch_final_failure",
445
+ "chain_id": chain_id,
446
+ "addresses_count": len(addresses),
447
+ "total_attempts": retry_manager.attempt,
448
+ "error_type": type(e).__name__,
449
+ "error_message": str(e),
450
+ "will_retry_next_poll": True,
451
+ }
452
+
453
+ if isinstance(e, HttpError):
454
+ self.contextual_logger.warning(
455
+ "HTTP error during batch fetch after %d attempts, will retry on next poll: %s",
456
+ retry_manager.attempt,
457
+ str(e),
458
+ context=final_error_context,
459
+ )
460
+
461
+ logger.warning(
462
+ "HTTP error during batch fetch for chain %s with %d addresses after %d attempts: %s. Will retry on next poll.",
463
+ chain_id,
464
+ len(addresses),
465
+ retry_manager.attempt,
466
+ e,
467
+ )
468
+ else:
469
+ self.contextual_logger.error(
470
+ "Polling failed after %d attempts, will retry on next poll: %s",
471
+ retry_manager.attempt,
472
+ str(e),
473
+ context=final_error_context,
474
+ exc_info=True,
475
+ )
476
+
477
+ logger.exception(
478
+ "Polling failed for chain %s with %d addresses after %d attempts: %s. Will retry on next poll.",
479
+ chain_id,
480
+ len(addresses),
481
+ retry_manager.attempt,
482
+ type(e).__name__,
483
+ )
484
+ break
321
485
 
322
486
  def _has_changed(self, key: str, new_pair: TokenPair) -> bool:
323
487
  """Check if pair data has changed"""
@@ -338,9 +502,17 @@ class PollingStream(StreamingClient):
338
502
  return key in self.subscriptions
339
503
 
340
504
  async def close(self):
341
- """Alias for disconnect"""
505
+ """Alias for disconnect with stats logging"""
506
+ close_context = {
507
+ "operation": "close_stream",
508
+ "final_stats": self.get_streaming_stats(),
509
+ }
510
+
511
+ self.contextual_logger.info("Closing polling stream", context=close_context)
342
512
  await self.disconnect()
343
513
 
514
+ self.contextual_logger.info("Polling stream closed", context=close_context)
515
+
344
516
  # Token subscription methods
345
517
  async def subscribe_token(
346
518
  self,
@@ -392,7 +564,6 @@ class PollingStream(StreamingClient):
392
564
 
393
565
  async def _poll_token(self, chain_id: str, token_address: str):
394
566
  """Poll all pairs for a specific token"""
395
- import time
396
567
 
397
568
  key = f"{chain_id}:{token_address}"
398
569
  next_poll_time = time.time()
@@ -419,44 +590,83 @@ class PollingStream(StreamingClient):
419
590
 
420
591
  async def _fetch_and_emit_token(self, chain_id: str, token_address: str):
421
592
  """Fetch all pairs for a token and emit updates"""
422
- import time
423
593
 
424
594
  key = f"{chain_id}:{token_address}"
425
595
  if key not in self._token_subscriptions:
426
596
  return
427
597
 
428
- try:
429
- # Log API request time
430
- request_start = time.time()
598
+ retry_manager = RetryManager(self.retry_config)
431
599
 
432
- # Fetch all pairs for this token
433
- pairs = await self.dexscreener_client.get_pairs_by_token_address_async(chain_id, token_address)
600
+ while True:
601
+ try:
602
+ # Log API request time
603
+ request_start = time.time()
434
604
 
435
- request_end = time.time()
436
- request_duration = request_end - request_start
605
+ # Fetch all pairs for this token
606
+ pairs = await self.dexscreener_client.get_pairs_by_token_address_async(chain_id, token_address)
437
607
 
438
- logger.debug(
439
- "Token fetch completed for %s:%s - %d pairs returned in %.2fms",
440
- chain_id,
441
- token_address,
442
- len(pairs),
443
- request_duration * 1000,
444
- )
608
+ request_end = time.time()
609
+ request_duration = request_end - request_start
445
610
 
446
- # Add timing info for debugging
447
- for pair in pairs:
448
- pair._request_duration = request_duration
449
- pair._request_time = request_end
611
+ logger.debug(
612
+ "Token fetch completed for %s:%s - %d pairs returned in %.2fms",
613
+ chain_id,
614
+ token_address,
615
+ len(pairs),
616
+ request_duration * 1000,
617
+ )
450
618
 
451
- # Emit to all callbacks
452
- for callback in self._token_subscriptions[key].copy():
453
- try:
454
- if asyncio.iscoroutinefunction(callback):
455
- await callback(pairs)
456
- else:
457
- callback(pairs)
458
- except Exception as e:
459
- logger.exception("Token callback error for %s:%s - %s", chain_id, token_address, type(e).__name__)
619
+ # Add timing info for debugging
620
+ for pair in pairs:
621
+ pair._request_duration = request_duration
622
+ pair._request_time = request_end
460
623
 
461
- except Exception:
462
- logger.exception("Token polling error for %s:%s", chain_id, token_address)
624
+ # Emit to all callbacks
625
+ for callback in self._token_subscriptions[key].copy():
626
+ try:
627
+ if asyncio.iscoroutinefunction(callback):
628
+ await callback(pairs)
629
+ else:
630
+ callback(pairs)
631
+ except Exception as e:
632
+ logger.exception(
633
+ "Token callback error for %s:%s - %s", chain_id, token_address, type(e).__name__
634
+ )
635
+
636
+ # Success - break out of retry loop
637
+ break
638
+
639
+ except Exception as e:
640
+ retry_manager.record_failure(e)
641
+
642
+ if retry_manager.should_retry(e):
643
+ logger.warning(
644
+ "Token polling error for %s:%s (attempt %d/%d): %s. Retrying in %.2fs",
645
+ chain_id,
646
+ token_address,
647
+ retry_manager.attempt,
648
+ self.retry_config.max_retries + 1,
649
+ str(e),
650
+ retry_manager.calculate_delay(),
651
+ )
652
+ await retry_manager.wait_async()
653
+ continue
654
+ else:
655
+ # Max retries exceeded - log and continue to next poll cycle
656
+ if isinstance(e, HttpError):
657
+ logger.warning(
658
+ "HTTP error during token fetch for %s:%s after %d attempts: %s. Will retry on next poll.",
659
+ chain_id,
660
+ token_address,
661
+ retry_manager.attempt,
662
+ e,
663
+ )
664
+ else:
665
+ logger.exception(
666
+ "Token polling failed for %s:%s after %d attempts: %s. Will retry on next poll.",
667
+ chain_id,
668
+ token_address,
669
+ retry_manager.attempt,
670
+ type(e).__name__,
671
+ )
672
+ break
@@ -1,4 +1,57 @@
1
1
  from .filters import FilterConfig, FilterPresets, TokenPairFilter
2
+ from .logging_config import (
3
+ ContextualLogger,
4
+ StructuredFormatter,
5
+ generate_correlation_id,
6
+ get_contextual_logger,
7
+ get_correlation_id,
8
+ log_function_call,
9
+ set_correlation_id,
10
+ setup_structured_logging,
11
+ with_correlation_id,
12
+ )
13
+ from .middleware import (
14
+ CorrelationMiddleware,
15
+ RequestTracker,
16
+ auto_track_requests,
17
+ get_correlation_middleware,
18
+ get_request_tracker,
19
+ track_request,
20
+ )
2
21
  from .ratelimit import RateLimiter
22
+ from .retry import (
23
+ RetryConfig,
24
+ RetryError,
25
+ RetryManager,
26
+ RetryPresets,
27
+ retry_async,
28
+ retry_sync,
29
+ )
3
30
 
4
- __all__ = ["FilterConfig", "FilterPresets", "RateLimiter", "TokenPairFilter"]
31
+ __all__ = [
32
+ "ContextualLogger",
33
+ "CorrelationMiddleware",
34
+ "FilterConfig",
35
+ "FilterPresets",
36
+ "RateLimiter",
37
+ "RequestTracker",
38
+ "RetryConfig",
39
+ "RetryError",
40
+ "RetryManager",
41
+ "RetryPresets",
42
+ "StructuredFormatter",
43
+ "TokenPairFilter",
44
+ "auto_track_requests",
45
+ "generate_correlation_id",
46
+ "get_contextual_logger",
47
+ "get_correlation_id",
48
+ "get_correlation_middleware",
49
+ "get_request_tracker",
50
+ "log_function_call",
51
+ "retry_async",
52
+ "retry_sync",
53
+ "set_correlation_id",
54
+ "setup_structured_logging",
55
+ "track_request",
56
+ "with_correlation_id",
57
+ ]