dexscreen 0.0.2__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dexscreen/__init__.py +87 -0
- dexscreen/api/client.py +275 -42
- dexscreen/core/exceptions.py +1067 -0
- dexscreen/core/http.py +859 -117
- dexscreen/core/validators.py +542 -0
- dexscreen/stream/polling.py +288 -78
- dexscreen/utils/__init__.py +54 -1
- dexscreen/utils/filters.py +182 -12
- dexscreen/utils/logging_config.py +421 -0
- dexscreen/utils/middleware.py +363 -0
- dexscreen/utils/ratelimit.py +212 -8
- dexscreen/utils/retry.py +357 -0
- {dexscreen-0.0.2.dist-info → dexscreen-0.0.5.dist-info}/METADATA +52 -1
- dexscreen-0.0.5.dist-info/RECORD +22 -0
- dexscreen-0.0.2.dist-info/RECORD +0 -17
- {dexscreen-0.0.2.dist-info → dexscreen-0.0.5.dist-info}/WHEEL +0 -0
- {dexscreen-0.0.2.dist-info → dexscreen-0.0.5.dist-info}/licenses/LICENSE +0 -0
dexscreen/utils/filters.py
CHANGED
@@ -7,6 +7,7 @@ from dataclasses import dataclass, field
|
|
7
7
|
from typing import Any, Optional
|
8
8
|
|
9
9
|
from ..core.models import TokenPair
|
10
|
+
from .logging_config import get_contextual_logger, with_correlation_id
|
10
11
|
|
11
12
|
|
12
13
|
@dataclass
|
@@ -37,6 +38,31 @@ class TokenPairFilter:
|
|
37
38
|
self._cache: dict[str, dict[str, Any]] = {}
|
38
39
|
self._last_update_times: dict[str, float] = {}
|
39
40
|
|
41
|
+
# Enhanced logging
|
42
|
+
self.contextual_logger = get_contextual_logger(__name__)
|
43
|
+
|
44
|
+
# Filter statistics
|
45
|
+
self.stats = {
|
46
|
+
"total_evaluations": 0,
|
47
|
+
"total_emissions": 0,
|
48
|
+
"rate_limited_blocks": 0,
|
49
|
+
"no_change_blocks": 0,
|
50
|
+
"insignificant_change_blocks": 0,
|
51
|
+
"cache_size": 0,
|
52
|
+
"emission_rate": 0.0,
|
53
|
+
}
|
54
|
+
|
55
|
+
init_context = {
|
56
|
+
"change_fields": self.config.change_fields,
|
57
|
+
"price_threshold": self.config.price_change_threshold,
|
58
|
+
"volume_threshold": self.config.volume_change_threshold,
|
59
|
+
"liquidity_threshold": self.config.liquidity_change_threshold,
|
60
|
+
"max_updates_per_second": self.config.max_updates_per_second,
|
61
|
+
}
|
62
|
+
|
63
|
+
self.contextual_logger.debug("TokenPairFilter initialized", context=init_context)
|
64
|
+
|
65
|
+
@with_correlation_id()
|
40
66
|
def should_emit(self, key: str, pair: TokenPair) -> bool:
|
41
67
|
"""
|
42
68
|
Check if update should be emitted based on filter rules.
|
@@ -48,21 +74,109 @@ class TokenPairFilter:
|
|
48
74
|
Returns:
|
49
75
|
True if update should be emitted, False otherwise
|
50
76
|
"""
|
51
|
-
|
52
|
-
|
53
|
-
|
77
|
+
self.stats["total_evaluations"] += 1
|
78
|
+
|
79
|
+
filter_context = {
|
80
|
+
"operation": "filter_evaluation",
|
81
|
+
"subscription_key": key[:32] + "..." if len(key) > 32 else key,
|
82
|
+
"pair_address": pair.pair_address[:10] + "..." if len(pair.pair_address) > 10 else pair.pair_address,
|
83
|
+
"chain_id": pair.chain_id,
|
84
|
+
"current_price": pair.price_usd,
|
85
|
+
}
|
86
|
+
|
87
|
+
try:
|
88
|
+
# Check rate limiting first
|
89
|
+
if not self._check_rate_limit(key):
|
90
|
+
self.stats["rate_limited_blocks"] += 1
|
91
|
+
filter_context.update(
|
92
|
+
{
|
93
|
+
"blocked_reason": "rate_limited",
|
94
|
+
"max_updates_per_second": self.config.max_updates_per_second,
|
95
|
+
}
|
96
|
+
)
|
97
|
+
|
98
|
+
self.contextual_logger.debug(
|
99
|
+
"Filter blocked update due to rate limiting for %s", key, context=filter_context
|
100
|
+
)
|
101
|
+
return False
|
54
102
|
|
55
|
-
|
56
|
-
|
57
|
-
|
103
|
+
# Check for changes
|
104
|
+
if not self._has_relevant_changes(key, pair):
|
105
|
+
self.stats["no_change_blocks"] += 1
|
106
|
+
filter_context.update(
|
107
|
+
{
|
108
|
+
"blocked_reason": "no_changes",
|
109
|
+
"monitored_fields": self.config.change_fields,
|
110
|
+
}
|
111
|
+
)
|
112
|
+
|
113
|
+
self.contextual_logger.debug(
|
114
|
+
"Filter blocked update - no relevant changes for %s", key, context=filter_context
|
115
|
+
)
|
116
|
+
return False
|
58
117
|
|
59
|
-
|
60
|
-
|
61
|
-
|
118
|
+
# Check if changes are significant enough
|
119
|
+
if not self._are_changes_significant(key, pair):
|
120
|
+
self.stats["insignificant_change_blocks"] += 1
|
121
|
+
filter_context.update(
|
122
|
+
{
|
123
|
+
"blocked_reason": "insignificant_changes",
|
124
|
+
"price_threshold": self.config.price_change_threshold,
|
125
|
+
"volume_threshold": self.config.volume_change_threshold,
|
126
|
+
"liquidity_threshold": self.config.liquidity_change_threshold,
|
127
|
+
}
|
128
|
+
)
|
129
|
+
|
130
|
+
self.contextual_logger.debug(
|
131
|
+
"Filter blocked update - changes not significant for %s", key, context=filter_context
|
132
|
+
)
|
133
|
+
return False
|
62
134
|
|
63
|
-
|
64
|
-
|
65
|
-
|
135
|
+
# Update cache and emit
|
136
|
+
self._update_cache(key, pair)
|
137
|
+
self.stats["total_emissions"] += 1
|
138
|
+
self.stats["cache_size"] = len(self._cache)
|
139
|
+
|
140
|
+
# Calculate emission rate
|
141
|
+
if self.stats["total_evaluations"] > 0:
|
142
|
+
self.stats["emission_rate"] = self.stats["total_emissions"] / self.stats["total_evaluations"]
|
143
|
+
|
144
|
+
filter_context.update(
|
145
|
+
{
|
146
|
+
"result": "emitted",
|
147
|
+
"total_emissions": self.stats["total_emissions"],
|
148
|
+
"emission_rate": self.stats["emission_rate"],
|
149
|
+
}
|
150
|
+
)
|
151
|
+
|
152
|
+
self.contextual_logger.debug(
|
153
|
+
"Filter allowing update emission for %s (emission #%d)",
|
154
|
+
key,
|
155
|
+
self.stats["total_emissions"],
|
156
|
+
context=filter_context,
|
157
|
+
)
|
158
|
+
|
159
|
+
return True
|
160
|
+
|
161
|
+
except Exception as e:
|
162
|
+
error_context = {
|
163
|
+
"operation": "filter_evaluation_error",
|
164
|
+
"subscription_key": key,
|
165
|
+
"error_type": type(e).__name__,
|
166
|
+
"error_message": str(e),
|
167
|
+
"pair_data": {
|
168
|
+
"address": pair.pair_address,
|
169
|
+
"chain": pair.chain_id,
|
170
|
+
"price": pair.price_usd,
|
171
|
+
},
|
172
|
+
}
|
173
|
+
|
174
|
+
self.contextual_logger.error(
|
175
|
+
"Error during filter evaluation for %s: %s", key, str(e), context=error_context, exc_info=True
|
176
|
+
)
|
177
|
+
|
178
|
+
# On error, default to allowing the emission to avoid blocking data
|
179
|
+
return True
|
66
180
|
|
67
181
|
def _check_rate_limit(self, key: str) -> bool:
|
68
182
|
"""Check if rate limit allows this update"""
|
@@ -166,12 +280,68 @@ class TokenPairFilter:
|
|
166
280
|
|
167
281
|
def reset(self, key: Optional[str] = None):
|
168
282
|
"""Reset filter state for a specific key or all keys"""
|
283
|
+
reset_context = {
|
284
|
+
"operation": "filter_reset",
|
285
|
+
"reset_scope": "single_key" if key else "all_keys",
|
286
|
+
"key": key if key else None,
|
287
|
+
"cache_size_before": len(self._cache),
|
288
|
+
}
|
289
|
+
|
169
290
|
if key:
|
170
291
|
self._cache.pop(key, None)
|
171
292
|
self._last_update_times.pop(key, None)
|
293
|
+
reset_context["cache_size_after"] = len(self._cache)
|
294
|
+
|
295
|
+
self.contextual_logger.debug("Filter state reset for key: %s", key, context=reset_context)
|
172
296
|
else:
|
173
297
|
self._cache.clear()
|
174
298
|
self._last_update_times.clear()
|
299
|
+
reset_context["cache_size_after"] = 0
|
300
|
+
|
301
|
+
self.contextual_logger.info("Filter state reset for all keys", context=reset_context)
|
302
|
+
|
303
|
+
def get_filter_stats(self) -> dict[str, Any]:
|
304
|
+
"""Get comprehensive filter statistics"""
|
305
|
+
total_blocks = (
|
306
|
+
self.stats["rate_limited_blocks"]
|
307
|
+
+ self.stats["no_change_blocks"]
|
308
|
+
+ self.stats["insignificant_change_blocks"]
|
309
|
+
)
|
310
|
+
|
311
|
+
stats = self.stats.copy()
|
312
|
+
stats.update(
|
313
|
+
{
|
314
|
+
"total_blocks": total_blocks,
|
315
|
+
"block_rate": total_blocks / max(1, self.stats["total_evaluations"]),
|
316
|
+
"cache_size": len(self._cache),
|
317
|
+
"tracked_subscriptions": len(self._last_update_times),
|
318
|
+
"config": {
|
319
|
+
"change_fields": self.config.change_fields,
|
320
|
+
"price_threshold": self.config.price_change_threshold,
|
321
|
+
"volume_threshold": self.config.volume_change_threshold,
|
322
|
+
"liquidity_threshold": self.config.liquidity_change_threshold,
|
323
|
+
"max_updates_per_second": self.config.max_updates_per_second,
|
324
|
+
},
|
325
|
+
}
|
326
|
+
)
|
327
|
+
|
328
|
+
return stats
|
329
|
+
|
330
|
+
def log_stats(self, operation: str = "filter_stats"):
|
331
|
+
"""Log current filter statistics"""
|
332
|
+
stats = self.get_filter_stats()
|
333
|
+
|
334
|
+
stats_context = {"operation": operation, **stats}
|
335
|
+
|
336
|
+
self.contextual_logger.info(
|
337
|
+
"Filter stats: %d evaluations, %d emissions (%.1f%%), %d blocks (%.1f%%)",
|
338
|
+
stats["total_evaluations"],
|
339
|
+
stats["total_emissions"],
|
340
|
+
stats["emission_rate"] * 100,
|
341
|
+
stats["total_blocks"],
|
342
|
+
stats["block_rate"] * 100,
|
343
|
+
context=stats_context,
|
344
|
+
)
|
175
345
|
|
176
346
|
|
177
347
|
# Preset configurations
|
@@ -0,0 +1,421 @@
|
|
1
|
+
"""
|
2
|
+
Enhanced logging utilities with correlation ID support and structured logging
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
import uuid
|
7
|
+
from contextvars import ContextVar
|
8
|
+
from functools import wraps
|
9
|
+
from typing import Any, Callable, Optional, TypeVar
|
10
|
+
|
11
|
+
import orjson
|
12
|
+
|
13
|
+
# Context variable for correlation ID
|
14
|
+
correlation_id_context: ContextVar[Optional[str]] = ContextVar("correlation_id", default=None)
|
15
|
+
|
16
|
+
# Type variable for decorated functions
|
17
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
18
|
+
|
19
|
+
|
20
|
+
class StructuredFormatter(logging.Formatter):
|
21
|
+
"""
|
22
|
+
Structured logging formatter that includes correlation IDs and context
|
23
|
+
"""
|
24
|
+
|
25
|
+
def __init__(self, include_correlation_id: bool = True, include_context: bool = True):
|
26
|
+
"""
|
27
|
+
Initialize structured formatter.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
include_correlation_id: Whether to include correlation ID in logs
|
31
|
+
include_context: Whether to include additional context in logs
|
32
|
+
"""
|
33
|
+
super().__init__()
|
34
|
+
self.include_correlation_id = include_correlation_id
|
35
|
+
self.include_context = include_context
|
36
|
+
|
37
|
+
def format(self, record: logging.LogRecord) -> str:
|
38
|
+
"""Format log record with structured data"""
|
39
|
+
# Base log data
|
40
|
+
log_data: dict[str, Any] = {
|
41
|
+
"timestamp": self.formatTime(record),
|
42
|
+
"level": record.levelname,
|
43
|
+
"logger": record.name,
|
44
|
+
"message": record.getMessage(),
|
45
|
+
}
|
46
|
+
|
47
|
+
# Add correlation ID if available
|
48
|
+
if self.include_correlation_id:
|
49
|
+
correlation_id = correlation_id_context.get()
|
50
|
+
if correlation_id:
|
51
|
+
log_data["correlation_id"] = correlation_id
|
52
|
+
|
53
|
+
# Add thread/process info for debugging
|
54
|
+
if record.thread:
|
55
|
+
log_data["thread_id"] = record.thread
|
56
|
+
if record.process:
|
57
|
+
log_data["process_id"] = record.process
|
58
|
+
|
59
|
+
# Add exception info if present
|
60
|
+
if record.exc_info:
|
61
|
+
log_data["exception"] = self.formatException(record.exc_info)
|
62
|
+
|
63
|
+
# Add any extra context from the log record
|
64
|
+
if self.include_context and hasattr(record, "context"):
|
65
|
+
log_data["context"] = record.context # type: ignore[attr-defined]
|
66
|
+
|
67
|
+
# Add any extra fields that were passed to the logger
|
68
|
+
extra_fields = {
|
69
|
+
k: v
|
70
|
+
for k, v in record.__dict__.items()
|
71
|
+
if k
|
72
|
+
not in {
|
73
|
+
"name",
|
74
|
+
"msg",
|
75
|
+
"args",
|
76
|
+
"levelname",
|
77
|
+
"levelno",
|
78
|
+
"pathname",
|
79
|
+
"filename",
|
80
|
+
"module",
|
81
|
+
"lineno",
|
82
|
+
"funcName",
|
83
|
+
"created",
|
84
|
+
"msecs",
|
85
|
+
"relativeCreated",
|
86
|
+
"thread",
|
87
|
+
"threadName",
|
88
|
+
"processName",
|
89
|
+
"process",
|
90
|
+
"exc_info",
|
91
|
+
"exc_text",
|
92
|
+
"stack_info",
|
93
|
+
"getMessage",
|
94
|
+
"context",
|
95
|
+
}
|
96
|
+
}
|
97
|
+
if extra_fields:
|
98
|
+
log_data["extra"] = extra_fields
|
99
|
+
|
100
|
+
# Serialize to JSON for structured logging
|
101
|
+
try:
|
102
|
+
return orjson.dumps(log_data, option=orjson.OPT_APPEND_NEWLINE).decode()
|
103
|
+
except (TypeError, ValueError):
|
104
|
+
# Fallback to string representation if JSON serialization fails
|
105
|
+
return str(log_data)
|
106
|
+
|
107
|
+
|
108
|
+
class ContextualLogger:
|
109
|
+
"""
|
110
|
+
Logger wrapper that automatically includes context and correlation IDs
|
111
|
+
"""
|
112
|
+
|
113
|
+
def __init__(self, logger: logging.Logger):
|
114
|
+
"""
|
115
|
+
Initialize contextual logger.
|
116
|
+
|
117
|
+
Args:
|
118
|
+
logger: The underlying logger instance
|
119
|
+
"""
|
120
|
+
self.logger = logger
|
121
|
+
|
122
|
+
def _log_with_context(self, level: int, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
123
|
+
"""Log message with context"""
|
124
|
+
if context:
|
125
|
+
# Add context as extra data to the log record
|
126
|
+
extra = kwargs.get("extra", {})
|
127
|
+
extra["context"] = context
|
128
|
+
kwargs["extra"] = extra
|
129
|
+
|
130
|
+
self.logger.log(level, msg, *args, **kwargs)
|
131
|
+
|
132
|
+
def debug(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
133
|
+
"""Log debug message with context"""
|
134
|
+
self._log_with_context(logging.DEBUG, msg, *args, context=context, **kwargs)
|
135
|
+
|
136
|
+
def info(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
137
|
+
"""Log info message with context"""
|
138
|
+
self._log_with_context(logging.INFO, msg, *args, context=context, **kwargs)
|
139
|
+
|
140
|
+
def warning(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
141
|
+
"""Log warning message with context"""
|
142
|
+
self._log_with_context(logging.WARNING, msg, *args, context=context, **kwargs)
|
143
|
+
|
144
|
+
def error(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
145
|
+
"""Log error message with context"""
|
146
|
+
self._log_with_context(logging.ERROR, msg, *args, context=context, **kwargs)
|
147
|
+
|
148
|
+
def critical(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
149
|
+
"""Log critical message with context"""
|
150
|
+
self._log_with_context(logging.CRITICAL, msg, *args, context=context, **kwargs)
|
151
|
+
|
152
|
+
def exception(self, msg: str, *args, context: Optional[dict[str, Any]] = None, **kwargs):
|
153
|
+
"""Log exception message with context"""
|
154
|
+
kwargs["exc_info"] = True
|
155
|
+
self._log_with_context(logging.ERROR, msg, *args, context=context, **kwargs)
|
156
|
+
|
157
|
+
|
158
|
+
def generate_correlation_id() -> str:
|
159
|
+
"""Generate a new correlation ID"""
|
160
|
+
return str(uuid.uuid4())
|
161
|
+
|
162
|
+
|
163
|
+
def set_correlation_id(correlation_id: str) -> None:
|
164
|
+
"""Set correlation ID for current context"""
|
165
|
+
correlation_id_context.set(correlation_id)
|
166
|
+
|
167
|
+
|
168
|
+
def get_correlation_id() -> Optional[str]:
|
169
|
+
"""Get correlation ID from current context"""
|
170
|
+
return correlation_id_context.get()
|
171
|
+
|
172
|
+
|
173
|
+
def with_correlation_id(correlation_id: Optional[str] = None) -> Callable[[F], F]:
|
174
|
+
"""
|
175
|
+
Decorator to automatically set correlation ID for function execution.
|
176
|
+
|
177
|
+
Args:
|
178
|
+
correlation_id: Specific correlation ID to use, or None to generate one
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
Decorated function
|
182
|
+
"""
|
183
|
+
|
184
|
+
def decorator(func: F) -> F:
|
185
|
+
@wraps(func)
|
186
|
+
def wrapper(*args, **kwargs):
|
187
|
+
# Generate or use provided correlation ID
|
188
|
+
corr_id = correlation_id or generate_correlation_id()
|
189
|
+
|
190
|
+
# Set correlation ID in context
|
191
|
+
token = correlation_id_context.set(corr_id)
|
192
|
+
try:
|
193
|
+
return func(*args, **kwargs)
|
194
|
+
finally:
|
195
|
+
# Reset correlation ID context
|
196
|
+
correlation_id_context.reset(token)
|
197
|
+
|
198
|
+
@wraps(func)
|
199
|
+
async def async_wrapper(*args, **kwargs):
|
200
|
+
# Generate or use provided correlation ID
|
201
|
+
corr_id = correlation_id or generate_correlation_id()
|
202
|
+
|
203
|
+
# Set correlation ID in context
|
204
|
+
token = correlation_id_context.set(corr_id)
|
205
|
+
try:
|
206
|
+
return await func(*args, **kwargs)
|
207
|
+
finally:
|
208
|
+
# Reset correlation ID context
|
209
|
+
correlation_id_context.reset(token)
|
210
|
+
|
211
|
+
# Return appropriate wrapper based on function type
|
212
|
+
import asyncio
|
213
|
+
|
214
|
+
if asyncio.iscoroutinefunction(func):
|
215
|
+
return async_wrapper # type: ignore
|
216
|
+
else:
|
217
|
+
return wrapper # type: ignore
|
218
|
+
|
219
|
+
return decorator
|
220
|
+
|
221
|
+
|
222
|
+
def setup_structured_logging(
|
223
|
+
level: int = logging.INFO,
|
224
|
+
use_structured_format: bool = True,
|
225
|
+
include_correlation_id: bool = True,
|
226
|
+
include_context: bool = True,
|
227
|
+
) -> None:
|
228
|
+
"""
|
229
|
+
Setup structured logging for the entire application.
|
230
|
+
|
231
|
+
Args:
|
232
|
+
level: Logging level
|
233
|
+
use_structured_format: Whether to use structured JSON format
|
234
|
+
include_correlation_id: Whether to include correlation IDs
|
235
|
+
include_context: Whether to include context in logs
|
236
|
+
"""
|
237
|
+
# Get root logger
|
238
|
+
root_logger = logging.getLogger()
|
239
|
+
root_logger.setLevel(level)
|
240
|
+
|
241
|
+
# Remove existing handlers
|
242
|
+
for handler in root_logger.handlers[:]:
|
243
|
+
root_logger.removeHandler(handler)
|
244
|
+
|
245
|
+
# Create console handler
|
246
|
+
console_handler = logging.StreamHandler()
|
247
|
+
console_handler.setLevel(level)
|
248
|
+
|
249
|
+
# Set formatter
|
250
|
+
if use_structured_format:
|
251
|
+
formatter = StructuredFormatter(include_correlation_id=include_correlation_id, include_context=include_context)
|
252
|
+
else:
|
253
|
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
254
|
+
|
255
|
+
console_handler.setFormatter(formatter)
|
256
|
+
root_logger.addHandler(console_handler)
|
257
|
+
|
258
|
+
|
259
|
+
def get_contextual_logger(name: str) -> ContextualLogger:
|
260
|
+
"""
|
261
|
+
Get a contextual logger for the given name.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
name: Logger name (typically __name__)
|
265
|
+
|
266
|
+
Returns:
|
267
|
+
Contextual logger instance
|
268
|
+
"""
|
269
|
+
logger = logging.getLogger(name)
|
270
|
+
return ContextualLogger(logger)
|
271
|
+
|
272
|
+
|
273
|
+
def log_function_call(
|
274
|
+
logger: Optional[ContextualLogger] = None,
|
275
|
+
log_args: bool = False,
|
276
|
+
log_result: bool = False,
|
277
|
+
log_level: int = logging.DEBUG,
|
278
|
+
mask_sensitive: bool = True,
|
279
|
+
) -> Callable[[F], F]:
|
280
|
+
"""
|
281
|
+
Decorator to log function calls with context.
|
282
|
+
|
283
|
+
Args:
|
284
|
+
logger: Logger to use (defaults to logger for function's module)
|
285
|
+
log_args: Whether to log function arguments
|
286
|
+
log_result: Whether to log function result
|
287
|
+
log_level: Logging level to use
|
288
|
+
mask_sensitive: Whether to mask sensitive data in logs
|
289
|
+
|
290
|
+
Returns:
|
291
|
+
Decorated function
|
292
|
+
"""
|
293
|
+
|
294
|
+
def decorator(func: F) -> F:
|
295
|
+
# Get logger if not provided
|
296
|
+
func_logger = logger or get_contextual_logger(func.__module__)
|
297
|
+
|
298
|
+
@wraps(func)
|
299
|
+
def wrapper(*args, **kwargs):
|
300
|
+
function_name = func.__name__
|
301
|
+
context: dict[str, Any] = {"function": function_name, "module": func.__module__}
|
302
|
+
|
303
|
+
# Log function arguments if requested
|
304
|
+
if log_args:
|
305
|
+
safe_args = _mask_sensitive_data(args) if mask_sensitive else args
|
306
|
+
safe_kwargs = _mask_sensitive_data(kwargs) if mask_sensitive else kwargs
|
307
|
+
context["args"] = safe_args
|
308
|
+
context["kwargs"] = safe_kwargs
|
309
|
+
|
310
|
+
func_logger._log_with_context(log_level, "Function %s called", function_name, context=context)
|
311
|
+
|
312
|
+
try:
|
313
|
+
result = func(*args, **kwargs)
|
314
|
+
|
315
|
+
# Log result if requested
|
316
|
+
if log_result:
|
317
|
+
safe_result = _mask_sensitive_data(result) if mask_sensitive else result
|
318
|
+
context["result"] = safe_result
|
319
|
+
|
320
|
+
func_logger._log_with_context(
|
321
|
+
log_level, "Function %s completed successfully", function_name, context=context
|
322
|
+
)
|
323
|
+
|
324
|
+
return result
|
325
|
+
|
326
|
+
except Exception as e:
|
327
|
+
error_context = context.copy()
|
328
|
+
error_context.update({"error_type": type(e).__name__, "error_message": str(e)})
|
329
|
+
|
330
|
+
func_logger._log_with_context(
|
331
|
+
logging.ERROR,
|
332
|
+
"Function %s failed with %s: %s",
|
333
|
+
function_name,
|
334
|
+
type(e).__name__,
|
335
|
+
str(e),
|
336
|
+
context=error_context,
|
337
|
+
exc_info=True,
|
338
|
+
)
|
339
|
+
raise
|
340
|
+
|
341
|
+
@wraps(func)
|
342
|
+
async def async_wrapper(*args, **kwargs):
|
343
|
+
function_name = func.__name__
|
344
|
+
context: dict[str, Any] = {"function": function_name, "module": func.__module__}
|
345
|
+
|
346
|
+
# Log function arguments if requested
|
347
|
+
if log_args:
|
348
|
+
safe_args = _mask_sensitive_data(args) if mask_sensitive else args
|
349
|
+
safe_kwargs = _mask_sensitive_data(kwargs) if mask_sensitive else kwargs
|
350
|
+
context["args"] = safe_args
|
351
|
+
context["kwargs"] = safe_kwargs
|
352
|
+
|
353
|
+
func_logger._log_with_context(log_level, "Async function %s called", function_name, context=context)
|
354
|
+
|
355
|
+
try:
|
356
|
+
result = await func(*args, **kwargs)
|
357
|
+
|
358
|
+
# Log result if requested
|
359
|
+
if log_result:
|
360
|
+
safe_result = _mask_sensitive_data(result) if mask_sensitive else result
|
361
|
+
context["result"] = safe_result
|
362
|
+
|
363
|
+
func_logger._log_with_context(
|
364
|
+
log_level, "Async function %s completed successfully", function_name, context=context
|
365
|
+
)
|
366
|
+
|
367
|
+
return result
|
368
|
+
|
369
|
+
except Exception as e:
|
370
|
+
error_context = context.copy()
|
371
|
+
error_context.update({"error_type": type(e).__name__, "error_message": str(e)})
|
372
|
+
|
373
|
+
func_logger._log_with_context(
|
374
|
+
logging.ERROR,
|
375
|
+
"Async function %s failed with %s: %s",
|
376
|
+
function_name,
|
377
|
+
type(e).__name__,
|
378
|
+
str(e),
|
379
|
+
context=error_context,
|
380
|
+
exc_info=True,
|
381
|
+
)
|
382
|
+
raise
|
383
|
+
|
384
|
+
# Return appropriate wrapper based on function type
|
385
|
+
import asyncio
|
386
|
+
|
387
|
+
if asyncio.iscoroutinefunction(func):
|
388
|
+
return async_wrapper # type: ignore
|
389
|
+
else:
|
390
|
+
return wrapper # type: ignore
|
391
|
+
|
392
|
+
return decorator
|
393
|
+
|
394
|
+
|
395
|
+
def _mask_sensitive_data(data: Any) -> Any:
|
396
|
+
"""
|
397
|
+
Mask sensitive data in logs to prevent credential leakage.
|
398
|
+
|
399
|
+
Args:
|
400
|
+
data: Data to mask
|
401
|
+
|
402
|
+
Returns:
|
403
|
+
Masked data
|
404
|
+
"""
|
405
|
+
if isinstance(data, dict):
|
406
|
+
masked = {}
|
407
|
+
for key, value in data.items():
|
408
|
+
if any(
|
409
|
+
sensitive in key.lower() for sensitive in ["password", "token", "secret", "key", "auth", "credential"]
|
410
|
+
):
|
411
|
+
masked[key] = "***MASKED***"
|
412
|
+
else:
|
413
|
+
masked[key] = _mask_sensitive_data(value)
|
414
|
+
return masked
|
415
|
+
elif isinstance(data, (list, tuple)):
|
416
|
+
return type(data)(_mask_sensitive_data(item) for item in data)
|
417
|
+
elif isinstance(data, str) and len(data) > 20:
|
418
|
+
# Mask long strings that might contain tokens
|
419
|
+
return f"{data[:8]}***MASKED***{data[-4:]}"
|
420
|
+
else:
|
421
|
+
return data
|