chuk-tool-processor 0.6.4__py3-none-any.whl → 0.9.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chuk-tool-processor might be problematic. Click here for more details.
- chuk_tool_processor/core/__init__.py +32 -1
- chuk_tool_processor/core/exceptions.py +225 -13
- chuk_tool_processor/core/processor.py +135 -104
- chuk_tool_processor/execution/strategies/__init__.py +6 -0
- chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
- chuk_tool_processor/execution/strategies/subprocess_strategy.py +202 -206
- chuk_tool_processor/execution/tool_executor.py +82 -84
- chuk_tool_processor/execution/wrappers/__init__.py +42 -0
- chuk_tool_processor/execution/wrappers/caching.py +150 -116
- chuk_tool_processor/execution/wrappers/circuit_breaker.py +370 -0
- chuk_tool_processor/execution/wrappers/rate_limiting.py +76 -43
- chuk_tool_processor/execution/wrappers/retry.py +116 -78
- chuk_tool_processor/logging/__init__.py +23 -17
- chuk_tool_processor/logging/context.py +40 -45
- chuk_tool_processor/logging/formatter.py +22 -21
- chuk_tool_processor/logging/helpers.py +28 -42
- chuk_tool_processor/logging/metrics.py +13 -15
- chuk_tool_processor/mcp/__init__.py +8 -12
- chuk_tool_processor/mcp/mcp_tool.py +158 -114
- chuk_tool_processor/mcp/register_mcp_tools.py +22 -22
- chuk_tool_processor/mcp/setup_mcp_http_streamable.py +57 -17
- chuk_tool_processor/mcp/setup_mcp_sse.py +57 -17
- chuk_tool_processor/mcp/setup_mcp_stdio.py +11 -11
- chuk_tool_processor/mcp/stream_manager.py +333 -276
- chuk_tool_processor/mcp/transport/__init__.py +22 -29
- chuk_tool_processor/mcp/transport/base_transport.py +180 -44
- chuk_tool_processor/mcp/transport/http_streamable_transport.py +505 -325
- chuk_tool_processor/mcp/transport/models.py +100 -0
- chuk_tool_processor/mcp/transport/sse_transport.py +607 -276
- chuk_tool_processor/mcp/transport/stdio_transport.py +597 -116
- chuk_tool_processor/models/__init__.py +21 -1
- chuk_tool_processor/models/execution_strategy.py +16 -21
- chuk_tool_processor/models/streaming_tool.py +28 -25
- chuk_tool_processor/models/tool_call.py +49 -31
- chuk_tool_processor/models/tool_export_mixin.py +22 -8
- chuk_tool_processor/models/tool_result.py +40 -77
- chuk_tool_processor/models/tool_spec.py +350 -0
- chuk_tool_processor/models/validated_tool.py +36 -18
- chuk_tool_processor/observability/__init__.py +30 -0
- chuk_tool_processor/observability/metrics.py +312 -0
- chuk_tool_processor/observability/setup.py +105 -0
- chuk_tool_processor/observability/tracing.py +345 -0
- chuk_tool_processor/plugins/__init__.py +1 -1
- chuk_tool_processor/plugins/discovery.py +11 -11
- chuk_tool_processor/plugins/parsers/__init__.py +1 -1
- chuk_tool_processor/plugins/parsers/base.py +1 -2
- chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
- chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
- chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
- chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
- chuk_tool_processor/registry/__init__.py +12 -12
- chuk_tool_processor/registry/auto_register.py +22 -30
- chuk_tool_processor/registry/decorators.py +127 -129
- chuk_tool_processor/registry/interface.py +26 -23
- chuk_tool_processor/registry/metadata.py +27 -22
- chuk_tool_processor/registry/provider.py +17 -18
- chuk_tool_processor/registry/providers/__init__.py +16 -19
- chuk_tool_processor/registry/providers/memory.py +18 -25
- chuk_tool_processor/registry/tool_export.py +42 -51
- chuk_tool_processor/utils/validation.py +15 -16
- chuk_tool_processor-0.9.7.dist-info/METADATA +1813 -0
- chuk_tool_processor-0.9.7.dist-info/RECORD +67 -0
- chuk_tool_processor-0.6.4.dist-info/METADATA +0 -697
- chuk_tool_processor-0.6.4.dist-info/RECORD +0 -60
- {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/WHEEL +0 -0
- {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/top_level.txt +0 -0
|
@@ -6,13 +6,14 @@ Adds exponential-back-off retry logic and *deadline-aware* timeout handling so a
|
|
|
6
6
|
`timeout=` passed by callers is treated as the **total wall-clock budget** for
|
|
7
7
|
all attempts of a single tool call.
|
|
8
8
|
"""
|
|
9
|
+
|
|
9
10
|
from __future__ import annotations
|
|
10
11
|
|
|
11
12
|
import asyncio
|
|
12
13
|
import random
|
|
13
14
|
import time
|
|
14
|
-
from datetime import
|
|
15
|
-
from typing import Any
|
|
15
|
+
from datetime import UTC, datetime
|
|
16
|
+
from typing import Any
|
|
16
17
|
|
|
17
18
|
from chuk_tool_processor.logging import get_logger
|
|
18
19
|
from chuk_tool_processor.models.tool_call import ToolCall
|
|
@@ -20,6 +21,24 @@ from chuk_tool_processor.models.tool_result import ToolResult
|
|
|
20
21
|
|
|
21
22
|
logger = get_logger("chuk_tool_processor.execution.wrappers.retry")
|
|
22
23
|
|
|
24
|
+
# Optional observability imports
|
|
25
|
+
try:
|
|
26
|
+
from chuk_tool_processor.observability.metrics import get_metrics
|
|
27
|
+
from chuk_tool_processor.observability.tracing import trace_retry_attempt
|
|
28
|
+
|
|
29
|
+
_observability_available = True
|
|
30
|
+
except ImportError:
|
|
31
|
+
_observability_available = False
|
|
32
|
+
|
|
33
|
+
# No-op functions when observability not available
|
|
34
|
+
def get_metrics():
|
|
35
|
+
return None
|
|
36
|
+
|
|
37
|
+
def trace_retry_attempt(*_args, **_kwargs):
|
|
38
|
+
from contextlib import nullcontext
|
|
39
|
+
|
|
40
|
+
return nullcontext()
|
|
41
|
+
|
|
23
42
|
|
|
24
43
|
# --------------------------------------------------------------------------- #
|
|
25
44
|
# Retry configuration
|
|
@@ -33,8 +52,9 @@ class RetryConfig:
|
|
|
33
52
|
base_delay: float = 1.0,
|
|
34
53
|
max_delay: float = 60.0,
|
|
35
54
|
jitter: bool = True,
|
|
36
|
-
retry_on_exceptions:
|
|
37
|
-
retry_on_error_substrings:
|
|
55
|
+
retry_on_exceptions: list[type[Exception]] | None = None,
|
|
56
|
+
retry_on_error_substrings: list[str] | None = None,
|
|
57
|
+
skip_retry_on_error_substrings: list[str] | None = None,
|
|
38
58
|
):
|
|
39
59
|
if max_retries < 0:
|
|
40
60
|
raise ValueError("max_retries cannot be negative")
|
|
@@ -44,6 +64,7 @@ class RetryConfig:
|
|
|
44
64
|
self.jitter = jitter
|
|
45
65
|
self.retry_on_exceptions = retry_on_exceptions or []
|
|
46
66
|
self.retry_on_error_substrings = retry_on_error_substrings or []
|
|
67
|
+
self.skip_retry_on_error_substrings = skip_retry_on_error_substrings or []
|
|
47
68
|
|
|
48
69
|
# --------------------------------------------------------------------- #
|
|
49
70
|
# Decision helpers
|
|
@@ -52,13 +73,21 @@ class RetryConfig:
|
|
|
52
73
|
self,
|
|
53
74
|
attempt: int,
|
|
54
75
|
*,
|
|
55
|
-
error:
|
|
56
|
-
error_str:
|
|
76
|
+
error: Exception | None = None,
|
|
77
|
+
error_str: str | None = None,
|
|
57
78
|
) -> bool:
|
|
58
79
|
"""Return *True* iff another retry is allowed for this attempt."""
|
|
59
80
|
if attempt >= self.max_retries:
|
|
60
81
|
return False
|
|
61
82
|
|
|
83
|
+
# Check skip list first - these errors should never be retried
|
|
84
|
+
# (e.g., OAuth errors that need to be handled at transport layer)
|
|
85
|
+
if error_str and self.skip_retry_on_error_substrings:
|
|
86
|
+
error_lower = error_str.lower()
|
|
87
|
+
if any(skip_pattern.lower() in error_lower for skip_pattern in self.skip_retry_on_error_substrings):
|
|
88
|
+
logger.debug(f"Skipping retry for error matching skip pattern: {error_str[:100]}")
|
|
89
|
+
return False
|
|
90
|
+
|
|
62
91
|
# Nothing specified → always retry until max_retries reached
|
|
63
92
|
if not self.retry_on_exceptions and not self.retry_on_error_substrings:
|
|
64
93
|
return True
|
|
@@ -66,17 +95,14 @@ class RetryConfig:
|
|
|
66
95
|
if error is not None and any(isinstance(error, exc) for exc in self.retry_on_exceptions):
|
|
67
96
|
return True
|
|
68
97
|
|
|
69
|
-
|
|
70
|
-
return True
|
|
71
|
-
|
|
72
|
-
return False
|
|
98
|
+
return bool(error_str and any(substr in error_str for substr in self.retry_on_error_substrings))
|
|
73
99
|
|
|
74
100
|
# --------------------------------------------------------------------- #
|
|
75
101
|
# Back-off
|
|
76
102
|
# --------------------------------------------------------------------- #
|
|
77
103
|
def get_delay(self, attempt: int) -> float:
|
|
78
104
|
"""Exponential back-off delay for *attempt* (0-based)."""
|
|
79
|
-
delay = min(self.base_delay * (2
|
|
105
|
+
delay = min(self.base_delay * (2**attempt), self.max_delay)
|
|
80
106
|
if self.jitter:
|
|
81
107
|
delay *= 0.5 + random.random() # jitter in [0.5, 1.5)
|
|
82
108
|
return delay
|
|
@@ -94,8 +120,8 @@ class RetryableToolExecutor:
|
|
|
94
120
|
self,
|
|
95
121
|
executor: Any,
|
|
96
122
|
*,
|
|
97
|
-
default_config:
|
|
98
|
-
tool_configs:
|
|
123
|
+
default_config: RetryConfig | None = None,
|
|
124
|
+
tool_configs: dict[str, RetryConfig] | None = None,
|
|
99
125
|
):
|
|
100
126
|
self.executor = executor
|
|
101
127
|
self.default_config = default_config or RetryConfig()
|
|
@@ -109,15 +135,15 @@ class RetryableToolExecutor:
|
|
|
109
135
|
|
|
110
136
|
async def execute(
|
|
111
137
|
self,
|
|
112
|
-
calls:
|
|
138
|
+
calls: list[ToolCall],
|
|
113
139
|
*,
|
|
114
|
-
timeout:
|
|
140
|
+
timeout: float | None = None,
|
|
115
141
|
use_cache: bool = True,
|
|
116
|
-
) ->
|
|
142
|
+
) -> list[ToolResult]:
|
|
117
143
|
if not calls:
|
|
118
144
|
return []
|
|
119
145
|
|
|
120
|
-
out:
|
|
146
|
+
out: list[ToolResult] = []
|
|
121
147
|
for call in calls:
|
|
122
148
|
cfg = self._config_for(call.tool)
|
|
123
149
|
out.append(await self._execute_single(call, cfg, timeout, use_cache))
|
|
@@ -130,11 +156,11 @@ class RetryableToolExecutor:
|
|
|
130
156
|
self,
|
|
131
157
|
call: ToolCall,
|
|
132
158
|
cfg: RetryConfig,
|
|
133
|
-
timeout:
|
|
159
|
+
timeout: float | None,
|
|
134
160
|
use_cache: bool,
|
|
135
161
|
) -> ToolResult:
|
|
136
162
|
attempt = 0
|
|
137
|
-
last_error:
|
|
163
|
+
last_error: str | None = None
|
|
138
164
|
pid = 0
|
|
139
165
|
machine = "unknown"
|
|
140
166
|
|
|
@@ -156,8 +182,8 @@ class RetryableToolExecutor:
|
|
|
156
182
|
tool=call.tool,
|
|
157
183
|
result=None,
|
|
158
184
|
error=f"Timeout after {timeout}s",
|
|
159
|
-
start_time=datetime.now(
|
|
160
|
-
end_time=datetime.now(
|
|
185
|
+
start_time=datetime.now(UTC),
|
|
186
|
+
end_time=datetime.now(UTC),
|
|
161
187
|
machine=machine,
|
|
162
188
|
pid=pid,
|
|
163
189
|
attempts=attempt,
|
|
@@ -168,64 +194,74 @@ class RetryableToolExecutor:
|
|
|
168
194
|
# ---------------------------------------------------------------- #
|
|
169
195
|
# Execute one attempt
|
|
170
196
|
# ---------------------------------------------------------------- #
|
|
171
|
-
start_time = datetime.now(
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
197
|
+
start_time = datetime.now(UTC)
|
|
198
|
+
|
|
199
|
+
# Trace retry attempt
|
|
200
|
+
with trace_retry_attempt(call.tool, attempt, cfg.max_retries):
|
|
201
|
+
try:
|
|
202
|
+
kwargs = {"timeout": remaining} if remaining is not None else {}
|
|
203
|
+
if hasattr(self.executor, "use_cache"):
|
|
204
|
+
kwargs["use_cache"] = use_cache
|
|
205
|
+
|
|
206
|
+
result = (await self.executor.execute([call], **kwargs))[0]
|
|
207
|
+
pid = result.pid
|
|
208
|
+
machine = result.machine
|
|
209
|
+
|
|
210
|
+
# Record retry metrics
|
|
211
|
+
metrics = get_metrics()
|
|
212
|
+
success = result.error is None
|
|
213
|
+
|
|
214
|
+
if metrics:
|
|
215
|
+
metrics.record_retry_attempt(call.tool, attempt, success)
|
|
216
|
+
|
|
217
|
+
# Success?
|
|
218
|
+
if success:
|
|
219
|
+
result.attempts = attempt + 1
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
# Error: decide on retry
|
|
223
|
+
last_error = result.error
|
|
224
|
+
if cfg.should_retry(attempt, error_str=result.error):
|
|
225
|
+
delay = cfg.get_delay(attempt)
|
|
226
|
+
# never overshoot the deadline
|
|
227
|
+
if deadline is not None:
|
|
228
|
+
delay = min(delay, max(deadline - time.monotonic(), 0))
|
|
229
|
+
if delay:
|
|
230
|
+
await asyncio.sleep(delay)
|
|
231
|
+
attempt += 1
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
# No more retries wanted
|
|
235
|
+
result.error = self._wrap_error(last_error, attempt, cfg)
|
|
183
236
|
result.attempts = attempt + 1
|
|
184
237
|
return result
|
|
185
238
|
|
|
186
|
-
#
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
delay = min(delay, max(deadline - time.monotonic(), 0))
|
|
213
|
-
if delay:
|
|
214
|
-
await asyncio.sleep(delay)
|
|
215
|
-
attempt += 1
|
|
216
|
-
continue
|
|
217
|
-
|
|
218
|
-
end_time = datetime.now(timezone.utc)
|
|
219
|
-
return ToolResult(
|
|
220
|
-
tool=call.tool,
|
|
221
|
-
result=None,
|
|
222
|
-
error=self._wrap_error(err_str, attempt, cfg),
|
|
223
|
-
start_time=start_time,
|
|
224
|
-
end_time=end_time,
|
|
225
|
-
machine=machine,
|
|
226
|
-
pid=pid,
|
|
227
|
-
attempts=attempt + 1,
|
|
228
|
-
)
|
|
239
|
+
# ---------------------------------------------------------------- #
|
|
240
|
+
# Exception path
|
|
241
|
+
# ---------------------------------------------------------------- #
|
|
242
|
+
except Exception as exc: # noqa: BLE001
|
|
243
|
+
err_str = str(exc)
|
|
244
|
+
last_error = err_str
|
|
245
|
+
if cfg.should_retry(attempt, error=exc, error_str=err_str):
|
|
246
|
+
delay = cfg.get_delay(attempt)
|
|
247
|
+
if deadline is not None:
|
|
248
|
+
delay = min(delay, max(deadline - time.monotonic(), 0))
|
|
249
|
+
if delay:
|
|
250
|
+
await asyncio.sleep(delay)
|
|
251
|
+
attempt += 1
|
|
252
|
+
continue
|
|
253
|
+
|
|
254
|
+
end_time = datetime.now(UTC)
|
|
255
|
+
return ToolResult(
|
|
256
|
+
tool=call.tool,
|
|
257
|
+
result=None,
|
|
258
|
+
error=self._wrap_error(err_str, attempt, cfg),
|
|
259
|
+
start_time=start_time,
|
|
260
|
+
end_time=end_time,
|
|
261
|
+
machine=machine,
|
|
262
|
+
pid=pid,
|
|
263
|
+
attempts=attempt + 1,
|
|
264
|
+
)
|
|
229
265
|
|
|
230
266
|
# --------------------------------------------------------------------- #
|
|
231
267
|
# Helpers
|
|
@@ -246,8 +282,9 @@ def retryable(
|
|
|
246
282
|
base_delay: float = 1.0,
|
|
247
283
|
max_delay: float = 60.0,
|
|
248
284
|
jitter: bool = True,
|
|
249
|
-
retry_on_exceptions:
|
|
250
|
-
retry_on_error_substrings:
|
|
285
|
+
retry_on_exceptions: list[type[Exception]] | None = None,
|
|
286
|
+
retry_on_error_substrings: list[str] | None = None,
|
|
287
|
+
skip_retry_on_error_substrings: list[str] | None = None,
|
|
251
288
|
):
|
|
252
289
|
"""
|
|
253
290
|
Class decorator that attaches a :class:`RetryConfig` to a *tool* class.
|
|
@@ -269,6 +306,7 @@ def retryable(
|
|
|
269
306
|
jitter=jitter,
|
|
270
307
|
retry_on_exceptions=retry_on_exceptions,
|
|
271
308
|
retry_on_error_substrings=retry_on_error_substrings,
|
|
309
|
+
skip_retry_on_error_substrings=skip_retry_on_error_substrings,
|
|
272
310
|
)
|
|
273
311
|
return cls
|
|
274
312
|
|
|
@@ -11,40 +11,44 @@ Key components:
|
|
|
11
11
|
- Metrics collection for tools and parsers
|
|
12
12
|
- Async-friendly context managers for spans and requests
|
|
13
13
|
"""
|
|
14
|
+
|
|
14
15
|
from __future__ import annotations
|
|
15
16
|
|
|
16
17
|
import logging
|
|
17
18
|
import sys
|
|
18
19
|
|
|
20
|
+
|
|
19
21
|
# Auto-initialize shutdown error suppression when logging package is imported
|
|
20
22
|
def _initialize_shutdown_fixes():
|
|
21
23
|
"""Initialize shutdown error suppression when the package is imported."""
|
|
22
24
|
try:
|
|
23
25
|
from .context import _setup_shutdown_error_suppression
|
|
26
|
+
|
|
24
27
|
_setup_shutdown_error_suppression()
|
|
25
28
|
except ImportError:
|
|
26
29
|
pass
|
|
27
30
|
|
|
31
|
+
|
|
28
32
|
# Initialize when package is imported
|
|
29
33
|
_initialize_shutdown_fixes()
|
|
30
34
|
|
|
31
35
|
# Import internal modules in correct order to avoid circular imports
|
|
32
36
|
# First, formatter has no internal dependencies
|
|
33
|
-
from .formatter import StructuredFormatter
|
|
34
|
-
|
|
35
37
|
# Second, context only depends on formatter
|
|
36
|
-
from .context import LogContext,
|
|
38
|
+
from .context import LogContext, StructuredAdapter, get_logger, log_context # noqa: E402
|
|
39
|
+
from .formatter import StructuredFormatter # noqa: E402
|
|
37
40
|
|
|
38
41
|
# Third, helpers depend on context
|
|
39
|
-
from .helpers import log_context_span,
|
|
42
|
+
from .helpers import log_context_span, log_tool_call, request_logging # noqa: E402
|
|
40
43
|
|
|
41
44
|
# Fourth, metrics depend on helpers and context
|
|
42
|
-
from .metrics import
|
|
45
|
+
from .metrics import MetricsLogger, metrics # noqa: E402
|
|
43
46
|
|
|
44
47
|
__all__ = [
|
|
45
48
|
"get_logger",
|
|
46
49
|
"log_context",
|
|
47
50
|
"LogContext",
|
|
51
|
+
"StructuredAdapter",
|
|
48
52
|
"log_context_span",
|
|
49
53
|
"request_logging",
|
|
50
54
|
"log_tool_call",
|
|
@@ -53,6 +57,7 @@ __all__ = [
|
|
|
53
57
|
"setup_logging",
|
|
54
58
|
]
|
|
55
59
|
|
|
60
|
+
|
|
56
61
|
# --------------------------------------------------------------------------- #
|
|
57
62
|
# Setup function for configuring logging
|
|
58
63
|
# --------------------------------------------------------------------------- #
|
|
@@ -63,7 +68,7 @@ async def setup_logging(
|
|
|
63
68
|
) -> None:
|
|
64
69
|
"""
|
|
65
70
|
Set up the logging system.
|
|
66
|
-
|
|
71
|
+
|
|
67
72
|
Args:
|
|
68
73
|
level: Logging level (default: INFO)
|
|
69
74
|
structured: Whether to use structured JSON logging
|
|
@@ -72,39 +77,40 @@ async def setup_logging(
|
|
|
72
77
|
# Get the root logger
|
|
73
78
|
root_logger = logging.getLogger("chuk_tool_processor")
|
|
74
79
|
root_logger.setLevel(level)
|
|
75
|
-
|
|
80
|
+
|
|
76
81
|
# Create formatter
|
|
77
|
-
formatter =
|
|
78
|
-
|
|
82
|
+
formatter = (
|
|
83
|
+
StructuredFormatter()
|
|
84
|
+
if structured
|
|
85
|
+
else logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
79
86
|
)
|
|
80
|
-
|
|
87
|
+
|
|
81
88
|
# Always add a dummy handler and remove it to satisfy test expectations
|
|
82
89
|
dummy_handler = logging.StreamHandler()
|
|
83
90
|
root_logger.addHandler(dummy_handler)
|
|
84
91
|
root_logger.removeHandler(dummy_handler)
|
|
85
|
-
|
|
92
|
+
|
|
86
93
|
# Now clear any remaining handlers
|
|
87
94
|
for handler in list(root_logger.handlers):
|
|
88
95
|
root_logger.removeHandler(handler)
|
|
89
|
-
|
|
96
|
+
|
|
90
97
|
# Add console handler
|
|
91
98
|
console_handler = logging.StreamHandler(sys.stderr)
|
|
92
99
|
console_handler.setLevel(level)
|
|
93
100
|
console_handler.setFormatter(formatter)
|
|
94
101
|
root_logger.addHandler(console_handler)
|
|
95
|
-
|
|
102
|
+
|
|
96
103
|
# Add file handler if specified
|
|
97
104
|
if log_file:
|
|
98
105
|
file_handler = logging.FileHandler(log_file)
|
|
99
106
|
file_handler.setLevel(level)
|
|
100
107
|
file_handler.setFormatter(formatter)
|
|
101
108
|
root_logger.addHandler(file_handler)
|
|
102
|
-
|
|
109
|
+
|
|
103
110
|
# Log startup with internal logger
|
|
104
111
|
internal_logger = logging.getLogger("chuk_tool_processor.logging")
|
|
105
112
|
internal_logger.info(
|
|
106
|
-
"Logging initialized",
|
|
107
|
-
extra={"context": {"level": logging.getLevelName(level), "structured": structured}}
|
|
113
|
+
"Logging initialized", extra={"context": {"level": logging.getLevelName(level), "structured": structured}}
|
|
108
114
|
)
|
|
109
115
|
|
|
110
116
|
|
|
@@ -115,4 +121,4 @@ root_logger.setLevel(logging.INFO)
|
|
|
115
121
|
_handler = logging.StreamHandler(sys.stderr)
|
|
116
122
|
_handler.setLevel(logging.INFO)
|
|
117
123
|
_handler.setFormatter(StructuredFormatter())
|
|
118
|
-
root_logger.addHandler(_handler)
|
|
124
|
+
root_logger.addHandler(_handler)
|
|
@@ -15,34 +15,31 @@ This module provides:
|
|
|
15
15
|
from __future__ import annotations
|
|
16
16
|
|
|
17
17
|
import asyncio
|
|
18
|
+
import atexit
|
|
19
|
+
import contextlib
|
|
18
20
|
import contextvars
|
|
19
21
|
import logging
|
|
22
|
+
import threading
|
|
20
23
|
import uuid
|
|
21
24
|
import warnings
|
|
22
|
-
import
|
|
23
|
-
import
|
|
24
|
-
from typing import (
|
|
25
|
-
Any,
|
|
26
|
-
AsyncContextManager,
|
|
27
|
-
AsyncGenerator,
|
|
28
|
-
Dict,
|
|
29
|
-
Optional,
|
|
30
|
-
)
|
|
25
|
+
from collections.abc import AsyncGenerator
|
|
26
|
+
from typing import Any
|
|
31
27
|
|
|
32
28
|
__all__ = ["LogContext", "log_context", "StructuredAdapter", "get_logger"]
|
|
33
29
|
|
|
30
|
+
|
|
34
31
|
# --------------------------------------------------------------------------- #
|
|
35
32
|
# Production-quality shutdown error handling
|
|
36
33
|
# --------------------------------------------------------------------------- #
|
|
37
34
|
class LibraryShutdownFilter(logging.Filter):
|
|
38
35
|
"""
|
|
39
36
|
Production filter for suppressing known harmless shutdown messages.
|
|
40
|
-
|
|
37
|
+
|
|
41
38
|
This filter ensures clean library shutdown by suppressing specific
|
|
42
39
|
error messages that occur during normal asyncio/anyio cleanup and
|
|
43
40
|
do not indicate actual problems.
|
|
44
41
|
"""
|
|
45
|
-
|
|
42
|
+
|
|
46
43
|
# Known harmless shutdown patterns
|
|
47
44
|
SUPPRESSED_PATTERNS = [
|
|
48
45
|
# Primary anyio error that this fixes
|
|
@@ -55,55 +52,56 @@ class LibraryShutdownFilter(logging.Filter):
|
|
|
55
52
|
("WARNING", "task was destroyed but it is pending"),
|
|
56
53
|
("ERROR", "event loop is closed"),
|
|
57
54
|
]
|
|
58
|
-
|
|
55
|
+
|
|
59
56
|
def filter(self, record: logging.LogRecord) -> bool:
|
|
60
57
|
"""Filter out known harmless shutdown messages."""
|
|
61
58
|
message = record.getMessage().lower()
|
|
62
59
|
level = record.levelname
|
|
63
|
-
|
|
60
|
+
|
|
64
61
|
for pattern_level, *pattern_phrases in self.SUPPRESSED_PATTERNS:
|
|
65
62
|
if level == pattern_level and all(phrase.lower() in message for phrase in pattern_phrases):
|
|
66
63
|
return False
|
|
67
|
-
|
|
64
|
+
|
|
68
65
|
return True
|
|
69
66
|
|
|
67
|
+
|
|
70
68
|
class LibraryLoggingManager:
|
|
71
69
|
"""
|
|
72
70
|
Clean manager for library-wide logging concerns.
|
|
73
|
-
|
|
71
|
+
|
|
74
72
|
Handles initialization and configuration of logging behavior
|
|
75
73
|
in a centralized, maintainable way.
|
|
76
74
|
"""
|
|
77
|
-
|
|
75
|
+
|
|
78
76
|
def __init__(self):
|
|
79
77
|
self._initialized = False
|
|
80
78
|
self._lock = threading.Lock()
|
|
81
|
-
|
|
79
|
+
|
|
82
80
|
def initialize(self):
|
|
83
81
|
"""Initialize clean shutdown behavior for the library."""
|
|
84
82
|
if self._initialized:
|
|
85
83
|
return
|
|
86
|
-
|
|
84
|
+
|
|
87
85
|
with self._lock:
|
|
88
86
|
if self._initialized:
|
|
89
87
|
return
|
|
90
|
-
|
|
88
|
+
|
|
91
89
|
self._setup_shutdown_handling()
|
|
92
90
|
self._setup_warning_filters()
|
|
93
91
|
self._initialized = True
|
|
94
|
-
|
|
92
|
+
|
|
95
93
|
def _setup_shutdown_handling(self):
|
|
96
94
|
"""Set up clean shutdown message handling."""
|
|
97
95
|
root_logger = logging.getLogger()
|
|
98
|
-
|
|
96
|
+
|
|
99
97
|
# Check if our filter is already present
|
|
100
98
|
for existing_filter in root_logger.filters:
|
|
101
99
|
if isinstance(existing_filter, LibraryShutdownFilter):
|
|
102
100
|
return
|
|
103
|
-
|
|
101
|
+
|
|
104
102
|
# Add our production-quality filter
|
|
105
103
|
root_logger.addFilter(LibraryShutdownFilter())
|
|
106
|
-
|
|
104
|
+
|
|
107
105
|
def _setup_warning_filters(self):
|
|
108
106
|
"""Set up Python warnings filters for clean shutdown."""
|
|
109
107
|
# Suppress specific asyncio/anyio warnings during shutdown
|
|
@@ -112,11 +110,12 @@ class LibraryLoggingManager:
|
|
|
112
110
|
".*coroutine was never awaited.*",
|
|
113
111
|
".*Task was destroyed but it is pending.*",
|
|
114
112
|
]
|
|
115
|
-
|
|
113
|
+
|
|
116
114
|
for pattern in warning_patterns:
|
|
117
115
|
warnings.filterwarnings("ignore", message=pattern, category=RuntimeWarning)
|
|
118
116
|
warnings.filterwarnings("ignore", message=pattern, category=ResourceWarning)
|
|
119
117
|
|
|
118
|
+
|
|
120
119
|
# Global manager instance
|
|
121
120
|
_logging_manager = LibraryLoggingManager()
|
|
122
121
|
|
|
@@ -130,15 +129,13 @@ atexit.register(lambda: None)
|
|
|
130
129
|
# Per-task context storage
|
|
131
130
|
# --------------------------------------------------------------------------- #
|
|
132
131
|
|
|
133
|
-
_context_var: contextvars.ContextVar[
|
|
134
|
-
"log_context", default={}
|
|
135
|
-
)
|
|
132
|
+
_context_var: contextvars.ContextVar[dict[str, Any] | None] = contextvars.ContextVar("log_context", default=None)
|
|
136
133
|
|
|
137
134
|
|
|
138
135
|
# --------------------------------------------------------------------------- #
|
|
139
136
|
# Helpers for turning async generators into async context managers
|
|
140
137
|
# --------------------------------------------------------------------------- #
|
|
141
|
-
class AsyncContextManagerWrapper(
|
|
138
|
+
class AsyncContextManagerWrapper(contextlib.AbstractAsyncContextManager):
|
|
142
139
|
"""Wrap an async generator so it can be used with `async with`."""
|
|
143
140
|
|
|
144
141
|
def __init__(self, gen: AsyncGenerator[Any, None]):
|
|
@@ -189,17 +186,18 @@ class LogContext:
|
|
|
189
186
|
# Public API
|
|
190
187
|
# ------------------------------------------------------------------ #
|
|
191
188
|
@property
|
|
192
|
-
def context(self) ->
|
|
189
|
+
def context(self) -> dict[str, Any]:
|
|
193
190
|
"""Return the current context dict (task-local)."""
|
|
194
|
-
|
|
191
|
+
ctx = _context_var.get()
|
|
192
|
+
return ctx if ctx is not None else {}
|
|
195
193
|
|
|
196
194
|
@property
|
|
197
|
-
def request_id(self) ->
|
|
195
|
+
def request_id(self) -> str | None:
|
|
198
196
|
"""Convenience accessor for the current request ID (if any)."""
|
|
199
197
|
return self.context.get("request_id")
|
|
200
198
|
|
|
201
199
|
# -- simple helpers ------------------------------------------------- #
|
|
202
|
-
def update(self, kv:
|
|
200
|
+
def update(self, kv: dict[str, Any]) -> None:
|
|
203
201
|
"""Merge *kv* into the current context."""
|
|
204
202
|
ctx = self.context.copy()
|
|
205
203
|
ctx.update(kv)
|
|
@@ -209,12 +207,12 @@ class LogContext:
|
|
|
209
207
|
"""Drop **all** contextual data."""
|
|
210
208
|
_context_var.set({})
|
|
211
209
|
|
|
212
|
-
def get_copy(self) ->
|
|
210
|
+
def get_copy(self) -> dict[str, Any]:
|
|
213
211
|
"""Return a **copy** of the current context."""
|
|
214
212
|
return self.context.copy()
|
|
215
213
|
|
|
216
214
|
# -- request helpers ------------------------------------------------ #
|
|
217
|
-
def start_request(self, request_id:
|
|
215
|
+
def start_request(self, request_id: str | None = None) -> str:
|
|
218
216
|
"""
|
|
219
217
|
Start a new *request* scope.
|
|
220
218
|
|
|
@@ -233,9 +231,7 @@ class LogContext:
|
|
|
233
231
|
# ------------------------------------------------------------------ #
|
|
234
232
|
# Async context helpers
|
|
235
233
|
# ------------------------------------------------------------------ #
|
|
236
|
-
async def _context_scope_gen(
|
|
237
|
-
self, **kwargs: Any
|
|
238
|
-
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
234
|
+
async def _context_scope_gen(self, **kwargs: Any) -> AsyncGenerator[dict[str, Any], None]:
|
|
239
235
|
prev_ctx = self.get_copy()
|
|
240
236
|
try:
|
|
241
237
|
self.update(kwargs)
|
|
@@ -243,7 +239,7 @@ class LogContext:
|
|
|
243
239
|
finally:
|
|
244
240
|
_context_var.set(prev_ctx)
|
|
245
241
|
|
|
246
|
-
def context_scope(self, **kwargs: Any) ->
|
|
242
|
+
def context_scope(self, **kwargs: Any) -> contextlib.AbstractAsyncContextManager:
|
|
247
243
|
"""
|
|
248
244
|
Temporarily add *kwargs* to the context.
|
|
249
245
|
|
|
@@ -256,9 +252,7 @@ class LogContext:
|
|
|
256
252
|
"""
|
|
257
253
|
return AsyncContextManagerWrapper(self._context_scope_gen(**kwargs))
|
|
258
254
|
|
|
259
|
-
async def _request_scope_gen(
|
|
260
|
-
self, request_id: Optional[str] = None
|
|
261
|
-
) -> AsyncGenerator[str, None]:
|
|
255
|
+
async def _request_scope_gen(self, request_id: str | None = None) -> AsyncGenerator[str, None]:
|
|
262
256
|
prev_ctx = self.get_copy()
|
|
263
257
|
try:
|
|
264
258
|
rid = self.start_request(request_id)
|
|
@@ -267,7 +261,7 @@ class LogContext:
|
|
|
267
261
|
finally:
|
|
268
262
|
_context_var.set(prev_ctx)
|
|
269
263
|
|
|
270
|
-
def request_scope(self, request_id:
|
|
264
|
+
def request_scope(self, request_id: str | None = None) -> contextlib.AbstractAsyncContextManager:
|
|
271
265
|
"""
|
|
272
266
|
Manage a full request lifecycle::
|
|
273
267
|
|
|
@@ -280,6 +274,7 @@ class LogContext:
|
|
|
280
274
|
# A convenient global instance that most code can just import and use.
|
|
281
275
|
log_context = LogContext()
|
|
282
276
|
|
|
277
|
+
|
|
283
278
|
# --------------------------------------------------------------------------- #
|
|
284
279
|
# StructuredAdapter
|
|
285
280
|
# --------------------------------------------------------------------------- #
|
|
@@ -337,10 +332,10 @@ class StructuredAdapter(logging.LoggerAdapter):
|
|
|
337
332
|
def get_logger(name: str) -> StructuredAdapter:
|
|
338
333
|
"""
|
|
339
334
|
Return a :class:`StructuredAdapter` wrapping ``logging.getLogger(name)``.
|
|
340
|
-
|
|
335
|
+
|
|
341
336
|
Includes automatic initialization of clean shutdown behavior.
|
|
342
337
|
"""
|
|
343
338
|
# Ensure clean shutdown behavior is initialized
|
|
344
339
|
_logging_manager.initialize()
|
|
345
|
-
|
|
346
|
-
return StructuredAdapter(logging.getLogger(name), {})
|
|
340
|
+
|
|
341
|
+
return StructuredAdapter(logging.getLogger(name), {})
|