chuk-tool-processor 0.4__py3-none-any.whl → 0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (29) hide show
  1. chuk_tool_processor/core/processor.py +1 -1
  2. chuk_tool_processor/execution/strategies/inprocess_strategy.py +30 -9
  3. chuk_tool_processor/execution/strategies/subprocess_strategy.py +139 -97
  4. chuk_tool_processor/execution/tool_executor.py +7 -7
  5. chuk_tool_processor/execution/wrappers/caching.py +3 -3
  6. chuk_tool_processor/execution/wrappers/retry.py +163 -174
  7. chuk_tool_processor/logging/__init__.py +12 -0
  8. chuk_tool_processor/logging/context.py +110 -7
  9. chuk_tool_processor/mcp/mcp_tool.py +148 -40
  10. chuk_tool_processor/mcp/register_mcp_tools.py +3 -3
  11. chuk_tool_processor/mcp/setup_mcp_sse.py +4 -4
  12. chuk_tool_processor/mcp/setup_mcp_stdio.py +2 -2
  13. chuk_tool_processor/mcp/stream_manager.py +71 -15
  14. chuk_tool_processor/mcp/transport/base_transport.py +2 -2
  15. chuk_tool_processor/mcp/transport/sse_transport.py +7 -19
  16. chuk_tool_processor/mcp/transport/stdio_transport.py +7 -9
  17. chuk_tool_processor/models/validated_tool.py +6 -6
  18. chuk_tool_processor/plugins/discovery.py +3 -3
  19. chuk_tool_processor/plugins/parsers/base.py +1 -1
  20. chuk_tool_processor/plugins/parsers/xml_tool.py +2 -2
  21. chuk_tool_processor/registry/auto_register.py +5 -5
  22. chuk_tool_processor/registry/decorators.py +278 -64
  23. chuk_tool_processor/registry/interface.py +2 -2
  24. chuk_tool_processor/registry/providers/memory.py +2 -2
  25. chuk_tool_processor/utils/validation.py +1 -1
  26. {chuk_tool_processor-0.4.dist-info → chuk_tool_processor-0.5.dist-info}/METADATA +2 -3
  27. {chuk_tool_processor-0.4.dist-info → chuk_tool_processor-0.5.dist-info}/RECORD +29 -29
  28. {chuk_tool_processor-0.4.dist-info → chuk_tool_processor-0.5.dist-info}/WHEEL +0 -0
  29. {chuk_tool_processor-0.4.dist-info → chuk_tool_processor-0.5.dist-info}/top_level.txt +0 -0
@@ -2,36 +2,31 @@
2
2
  """
3
3
  Async-native retry wrapper for tool execution.
4
4
 
5
- This module provides a retry mechanism for tool calls that can automatically
6
- retry failed executions based on configurable criteria and backoff strategies.
5
+ Adds exponential–back-off retry logic and *deadline-aware* timeout handling so a
6
+ `timeout=` passed by callers is treated as the **total wall-clock budget** for
7
+ all attempts of a single tool call.
7
8
  """
8
9
  from __future__ import annotations
9
10
 
10
11
  import asyncio
11
- import logging
12
12
  import random
13
+ import time
13
14
  from datetime import datetime, timezone
14
- from typing import Any, Dict, List, Optional, Type, Union
15
+ from typing import Any, Dict, List, Optional, Type
15
16
 
17
+ from chuk_tool_processor.logging import get_logger
16
18
  from chuk_tool_processor.models.tool_call import ToolCall
17
19
  from chuk_tool_processor.models.tool_result import ToolResult
18
- from chuk_tool_processor.logging import get_logger
19
20
 
20
21
  logger = get_logger("chuk_tool_processor.execution.wrappers.retry")
21
22
 
22
23
 
24
+ # --------------------------------------------------------------------------- #
25
+ # Retry configuration
26
+ # --------------------------------------------------------------------------- #
23
27
  class RetryConfig:
24
- """
25
- Configuration for retry behavior.
26
-
27
- Attributes:
28
- max_retries: Maximum number of retry attempts
29
- base_delay: Base delay between retries in seconds
30
- max_delay: Maximum delay between retries in seconds
31
- jitter: Whether to add random jitter to delays
32
- retry_on_exceptions: List of exception types to retry on
33
- retry_on_error_substrings: List of error message substrings to retry on
34
- """
28
+ """Configuration object that decides *whether* and *when* to retry."""
29
+
35
30
  def __init__(
36
31
  self,
37
32
  max_retries: int = 3,
@@ -39,248 +34,242 @@ class RetryConfig:
39
34
  max_delay: float = 60.0,
40
35
  jitter: bool = True,
41
36
  retry_on_exceptions: Optional[List[Type[Exception]]] = None,
42
- retry_on_error_substrings: Optional[List[str]] = None
37
+ retry_on_error_substrings: Optional[List[str]] = None,
43
38
  ):
39
+ if max_retries < 0:
40
+ raise ValueError("max_retries cannot be negative")
44
41
  self.max_retries = max_retries
45
42
  self.base_delay = base_delay
46
43
  self.max_delay = max_delay
47
44
  self.jitter = jitter
48
45
  self.retry_on_exceptions = retry_on_exceptions or []
49
46
  self.retry_on_error_substrings = retry_on_error_substrings or []
50
-
51
- def should_retry(self, attempt: int, error: Optional[Exception] = None, error_str: Optional[str] = None) -> bool:
52
- """
53
- Determine if a retry should be attempted.
54
-
55
- Args:
56
- attempt: Current attempt number (0-based)
57
- error: Exception that caused the failure, if any
58
- error_str: Error message string, if any
59
-
60
- Returns:
61
- True if a retry should be attempted, False otherwise
62
- """
47
+
48
+ # --------------------------------------------------------------------- #
49
+ # Decision helpers
50
+ # --------------------------------------------------------------------- #
51
+ def should_retry( # noqa: D401 (imperative mood is fine)
52
+ self,
53
+ attempt: int,
54
+ *,
55
+ error: Optional[Exception] = None,
56
+ error_str: Optional[str] = None,
57
+ ) -> bool:
58
+ """Return *True* iff another retry is allowed for this attempt."""
63
59
  if attempt >= self.max_retries:
64
60
  return False
61
+
62
+ # Nothing specified → always retry until max_retries reached
65
63
  if not self.retry_on_exceptions and not self.retry_on_error_substrings:
66
64
  return True
65
+
67
66
  if error is not None and any(isinstance(error, exc) for exc in self.retry_on_exceptions):
68
67
  return True
68
+
69
69
  if error_str and any(substr in error_str for substr in self.retry_on_error_substrings):
70
70
  return True
71
+
71
72
  return False
72
-
73
+
74
+ # --------------------------------------------------------------------- #
75
+ # Back-off
76
+ # --------------------------------------------------------------------- #
73
77
  def get_delay(self, attempt: int) -> float:
74
- """
75
- Calculate the delay for the current attempt with exponential backoff.
76
-
77
- Args:
78
- attempt: Current attempt number (0-based)
79
-
80
- Returns:
81
- Delay in seconds
82
- """
78
+ """Exponential back-off delay for *attempt* (0-based)."""
83
79
  delay = min(self.base_delay * (2 ** attempt), self.max_delay)
84
80
  if self.jitter:
85
- delay *= (0.5 + random.random())
81
+ delay *= 0.5 + random.random() # jitter in [0.5, 1.5)
86
82
  return delay
87
83
 
88
84
 
85
+ # --------------------------------------------------------------------------- #
86
+ # Retryable executor
87
+ # --------------------------------------------------------------------------- #
89
88
  class RetryableToolExecutor:
90
89
  """
91
- Wrapper for a tool executor that applies retry logic.
92
-
93
- This executor wraps another executor and automatically retries failed
94
- tool calls based on configured retry policies.
90
+ Wraps another executor and re-invokes it according to a :class:`RetryConfig`.
95
91
  """
92
+
96
93
  def __init__(
97
94
  self,
98
95
  executor: Any,
96
+ *,
99
97
  default_config: Optional[RetryConfig] = None,
100
- tool_configs: Optional[Dict[str, RetryConfig]] = None
98
+ tool_configs: Optional[Dict[str, RetryConfig]] = None,
101
99
  ):
102
- """
103
- Initialize the retryable executor.
104
-
105
- Args:
106
- executor: The underlying executor to wrap
107
- default_config: Default retry configuration for all tools
108
- tool_configs: Tool-specific retry configurations
109
- """
110
100
  self.executor = executor
111
101
  self.default_config = default_config or RetryConfig()
112
102
  self.tool_configs = tool_configs or {}
113
-
114
- def _get_config(self, tool: str) -> RetryConfig:
115
- """Get the retry configuration for a specific tool."""
103
+
104
+ # --------------------------------------------------------------------- #
105
+ # Public helpers
106
+ # --------------------------------------------------------------------- #
107
+ def _config_for(self, tool: str) -> RetryConfig:
116
108
  return self.tool_configs.get(tool, self.default_config)
117
-
109
+
118
110
  async def execute(
119
111
  self,
120
112
  calls: List[ToolCall],
113
+ *,
121
114
  timeout: Optional[float] = None,
122
- use_cache: bool = True
115
+ use_cache: bool = True,
123
116
  ) -> List[ToolResult]:
124
- """
125
- Execute tool calls with retry logic.
126
-
127
- Args:
128
- calls: List of tool calls to execute
129
- timeout: Optional timeout for each execution
130
- use_cache: Whether to use cached results (passed to underlying executor)
131
-
132
- Returns:
133
- List of tool results
134
- """
135
- # Handle empty calls list
136
117
  if not calls:
137
118
  return []
138
-
139
- # Execute each call with retries
140
- results: List[ToolResult] = []
119
+
120
+ out: List[ToolResult] = []
141
121
  for call in calls:
142
- config = self._get_config(call.tool)
143
- result = await self._execute_with_retry(call, config, timeout, use_cache)
144
- results.append(result)
145
- return results
146
-
147
- async def _execute_with_retry(
122
+ cfg = self._config_for(call.tool)
123
+ out.append(await self._execute_single(call, cfg, timeout, use_cache))
124
+ return out
125
+
126
+ # --------------------------------------------------------------------- #
127
+ # Core retry loop (per call)
128
+ # --------------------------------------------------------------------- #
129
+ async def _execute_single(
148
130
  self,
149
131
  call: ToolCall,
150
- config: RetryConfig,
132
+ cfg: RetryConfig,
151
133
  timeout: Optional[float],
152
- use_cache: bool
134
+ use_cache: bool,
153
135
  ) -> ToolResult:
154
- """
155
- Execute a single tool call with retries.
156
-
157
- Args:
158
- call: Tool call to execute
159
- config: Retry configuration to use
160
- timeout: Optional timeout for execution
161
- use_cache: Whether to use cached results
162
-
163
- Returns:
164
- Tool result after retries
165
- """
166
136
  attempt = 0
167
137
  last_error: Optional[str] = None
168
138
  pid = 0
169
139
  machine = "unknown"
170
-
140
+
141
+ # ---------------------------------------------------------------- #
142
+ # Deadline budget (wall-clock)
143
+ # ---------------------------------------------------------------- #
144
+ deadline = None
145
+ if timeout is not None:
146
+ deadline = time.monotonic() + timeout
147
+
171
148
  while True:
149
+ # ---------------------------------------------------------------- #
150
+ # Check whether we have any time left *before* trying the call
151
+ # ---------------------------------------------------------------- #
152
+ if deadline is not None:
153
+ remaining = deadline - time.monotonic()
154
+ if remaining <= 0:
155
+ return ToolResult(
156
+ tool=call.tool,
157
+ result=None,
158
+ error=f"Timeout after {timeout}s",
159
+ start_time=datetime.now(timezone.utc),
160
+ end_time=datetime.now(timezone.utc),
161
+ machine=machine,
162
+ pid=pid,
163
+ attempts=attempt,
164
+ )
165
+ else:
166
+ remaining = None # unlimited
167
+
168
+ # ---------------------------------------------------------------- #
169
+ # Execute one attempt
170
+ # ---------------------------------------------------------------- #
172
171
  start_time = datetime.now(timezone.utc)
173
-
174
172
  try:
175
- # Pass the use_cache parameter if the executor supports it
176
- executor_kwargs = {"timeout": timeout}
173
+ kwargs = {"timeout": remaining} if remaining is not None else {}
177
174
  if hasattr(self.executor, "use_cache"):
178
- executor_kwargs["use_cache"] = use_cache
179
-
180
- # Execute call
181
- tool_results = await self.executor.execute([call], **executor_kwargs)
182
- result = tool_results[0]
175
+ kwargs["use_cache"] = use_cache
176
+
177
+ result = (await self.executor.execute([call], **kwargs))[0]
183
178
  pid = result.pid
184
179
  machine = result.machine
185
-
186
- # Check for error in result
187
- if result.error:
188
- last_error = result.error
189
- if config.should_retry(attempt, error_str=result.error):
190
- logger.debug(
191
- f"Retrying tool {call.tool} after error: {result.error} (attempt {attempt + 1}/{config.max_retries})"
192
- )
193
- await asyncio.sleep(config.get_delay(attempt))
194
- attempt += 1
195
- continue
196
-
197
- # No retry: if any retries happened, wrap final error
198
- if attempt > 0:
199
- end_time = datetime.now(timezone.utc)
200
- final = ToolResult(
201
- tool=call.tool,
202
- result=None,
203
- error=f"Max retries reached ({config.max_retries}): {last_error}",
204
- start_time=start_time,
205
- end_time=end_time,
206
- machine=machine,
207
- pid=pid
208
- )
209
- # Attach attempts
210
- final.attempts = attempt + 1 # Include the original attempt
211
- return final
212
-
213
- # No retries occurred, return the original failure
214
- result.attempts = 1
180
+
181
+ # Success?
182
+ if not result.error:
183
+ result.attempts = attempt + 1
215
184
  return result
216
-
217
- # Success: attach attempts and return
218
- result.attempts = attempt + 1 # Include the original attempt
185
+
186
+ # Error: decide on retry
187
+ last_error = result.error
188
+ if cfg.should_retry(attempt, error_str=result.error):
189
+ delay = cfg.get_delay(attempt)
190
+ # never overshoot the deadline
191
+ if deadline is not None:
192
+ delay = min(delay, max(deadline - time.monotonic(), 0))
193
+ if delay:
194
+ await asyncio.sleep(delay)
195
+ attempt += 1
196
+ continue
197
+
198
+ # No more retries wanted
199
+ result.error = self._wrap_error(last_error, attempt, cfg)
200
+ result.attempts = attempt + 1
219
201
  return result
220
-
221
- except Exception as e:
222
- err_str = str(e)
202
+
203
+ # ---------------------------------------------------------------- #
204
+ # Exception path
205
+ # ---------------------------------------------------------------- #
206
+ except Exception as exc: # noqa: BLE001
207
+ err_str = str(exc)
223
208
  last_error = err_str
224
-
225
- if config.should_retry(attempt, error=e):
226
- logger.info(
227
- f"Retrying tool {call.tool} after exception: {err_str} (attempt {attempt + 1}/{config.max_retries})"
228
- )
229
- await asyncio.sleep(config.get_delay(attempt))
209
+ if cfg.should_retry(attempt, error=exc):
210
+ delay = cfg.get_delay(attempt)
211
+ if deadline is not None:
212
+ delay = min(delay, max(deadline - time.monotonic(), 0))
213
+ if delay:
214
+ await asyncio.sleep(delay)
230
215
  attempt += 1
231
216
  continue
232
-
233
- # No more retries: return error result
217
+
234
218
  end_time = datetime.now(timezone.utc)
235
- final_exc = ToolResult(
219
+ return ToolResult(
236
220
  tool=call.tool,
237
221
  result=None,
238
- error=err_str,
222
+ error=self._wrap_error(err_str, attempt, cfg),
239
223
  start_time=start_time,
240
224
  end_time=end_time,
241
225
  machine=machine,
242
- pid=pid
226
+ pid=pid,
227
+ attempts=attempt + 1,
243
228
  )
244
- final_exc.attempts = attempt + 1 # Include the original attempt
245
- return final_exc
246
229
 
230
+ # --------------------------------------------------------------------- #
231
+ # Helpers
232
+ # --------------------------------------------------------------------- #
233
+ @staticmethod
234
+ def _wrap_error(err: str, attempt: int, cfg: RetryConfig) -> str:
235
+ if attempt >= cfg.max_retries and attempt > 0:
236
+ return f"Max retries reached ({cfg.max_retries}): {err}"
237
+ return err
247
238
 
239
+
240
+ # --------------------------------------------------------------------------- #
241
+ # Decorator helper
242
+ # --------------------------------------------------------------------------- #
248
243
  def retryable(
244
+ *,
249
245
  max_retries: int = 3,
250
246
  base_delay: float = 1.0,
251
247
  max_delay: float = 60.0,
252
248
  jitter: bool = True,
253
249
  retry_on_exceptions: Optional[List[Type[Exception]]] = None,
254
- retry_on_error_substrings: Optional[List[str]] = None
250
+ retry_on_error_substrings: Optional[List[str]] = None,
255
251
  ):
256
252
  """
257
- Decorator for tool classes to configure retry behavior.
258
-
259
- Example:
260
- @retryable(max_retries=5, base_delay=2.0)
261
- class MyTool:
262
- async def execute(self, x: int, y: int) -> int:
263
- return x + y
264
-
265
- Args:
266
- max_retries: Maximum number of retry attempts
267
- base_delay: Base delay between retries in seconds
268
- max_delay: Maximum delay between retries in seconds
269
- jitter: Whether to add random jitter to delays
270
- retry_on_exceptions: List of exception types to retry on
271
- retry_on_error_substrings: List of error message substrings to retry on
272
-
273
- Returns:
274
- Decorated class with retry configuration
253
+ Class decorator that attaches a :class:`RetryConfig` to a *tool* class.
254
+
255
+ Example
256
+ -------
257
+ ```python
258
+ @retryable(max_retries=5, base_delay=0.5)
259
+ class MyTool:
260
+ ...
261
+ ```
275
262
  """
276
- def decorator(cls):
263
+
264
+ def _decorator(cls):
277
265
  cls._retry_config = RetryConfig(
278
266
  max_retries=max_retries,
279
267
  base_delay=base_delay,
280
268
  max_delay=max_delay,
281
269
  jitter=jitter,
282
270
  retry_on_exceptions=retry_on_exceptions,
283
- retry_on_error_substrings=retry_on_error_substrings
271
+ retry_on_error_substrings=retry_on_error_substrings,
284
272
  )
285
273
  return cls
286
- return decorator
274
+
275
+ return _decorator
@@ -16,6 +16,18 @@ from __future__ import annotations
16
16
  import logging
17
17
  import sys
18
18
 
19
+ # Auto-initialize shutdown error suppression when logging package is imported
20
+ def _initialize_shutdown_fixes():
21
+ """Initialize shutdown error suppression when the package is imported."""
22
+ try:
23
+ from .context import _setup_shutdown_error_suppression
24
+ _setup_shutdown_error_suppression()
25
+ except ImportError:
26
+ pass
27
+
28
+ # Initialize when package is imported
29
+ _initialize_shutdown_fixes()
30
+
19
31
  # Import internal modules in correct order to avoid circular imports
20
32
  # First, formatter has no internal dependencies
21
33
  from .formatter import StructuredFormatter
@@ -4,12 +4,12 @@ Async-safe context management for structured logging.
4
4
 
5
5
  This module provides:
6
6
 
7
- * **LogContext** an `asyncio`-aware container that keeps a per-task dict of
7
+ * **LogContext** - an `asyncio`-aware container that keeps a per-task dict of
8
8
  contextual data (request IDs, span IDs, arbitrary metadata, …).
9
- * **log_context** a global instance of `LogContext` for convenience.
10
- * **StructuredAdapter** a `logging.LoggerAdapter` that injects the current
9
+ * **log_context** - a global instance of `LogContext` for convenience.
10
+ * **StructuredAdapter** - a `logging.LoggerAdapter` that injects the current
11
11
  `log_context.context` into every log record.
12
- * **get_logger** helper that returns a configured `StructuredAdapter`.
12
+ * **get_logger** - helper that returns a configured `StructuredAdapter`.
13
13
  """
14
14
 
15
15
  from __future__ import annotations
@@ -18,6 +18,9 @@ import asyncio
18
18
  import contextvars
19
19
  import logging
20
20
  import uuid
21
+ import warnings
22
+ import threading
23
+ import atexit
21
24
  from typing import (
22
25
  Any,
23
26
  AsyncContextManager,
@@ -28,6 +31,101 @@ from typing import (
28
31
 
29
32
  __all__ = ["LogContext", "log_context", "StructuredAdapter", "get_logger"]
30
33
 
34
+ # --------------------------------------------------------------------------- #
35
+ # Production-quality shutdown error handling
36
+ # --------------------------------------------------------------------------- #
37
+ class LibraryShutdownFilter(logging.Filter):
38
+ """
39
+ Production filter for suppressing known harmless shutdown messages.
40
+
41
+ This filter ensures clean library shutdown by suppressing specific
42
+ error messages that occur during normal asyncio/anyio cleanup and
43
+ do not indicate actual problems.
44
+ """
45
+
46
+ # Known harmless shutdown patterns
47
+ SUPPRESSED_PATTERNS = [
48
+ # Primary anyio error that this fixes
49
+ ("ERROR", "Task error during shutdown", "Attempted to exit cancel scope in a different task"),
50
+ # Related asyncio/anyio shutdown messages
51
+ ("WARNING", "cancel scope in a different task"),
52
+ ("ERROR", "cancel scope in a different task"),
53
+ ("WARNING", "attempted to exit cancel scope"),
54
+ ("ERROR", "attempted to exit cancel scope"),
55
+ ("WARNING", "task was destroyed but it is pending"),
56
+ ("ERROR", "event loop is closed"),
57
+ ]
58
+
59
+ def filter(self, record: logging.LogRecord) -> bool:
60
+ """Filter out known harmless shutdown messages."""
61
+ message = record.getMessage().lower()
62
+ level = record.levelname
63
+
64
+ for pattern_level, *pattern_phrases in self.SUPPRESSED_PATTERNS:
65
+ if level == pattern_level and all(phrase.lower() in message for phrase in pattern_phrases):
66
+ return False
67
+
68
+ return True
69
+
70
+ class LibraryLoggingManager:
71
+ """
72
+ Clean manager for library-wide logging concerns.
73
+
74
+ Handles initialization and configuration of logging behavior
75
+ in a centralized, maintainable way.
76
+ """
77
+
78
+ def __init__(self):
79
+ self._initialized = False
80
+ self._lock = threading.Lock()
81
+
82
+ def initialize(self):
83
+ """Initialize clean shutdown behavior for the library."""
84
+ if self._initialized:
85
+ return
86
+
87
+ with self._lock:
88
+ if self._initialized:
89
+ return
90
+
91
+ self._setup_shutdown_handling()
92
+ self._setup_warning_filters()
93
+ self._initialized = True
94
+
95
+ def _setup_shutdown_handling(self):
96
+ """Set up clean shutdown message handling."""
97
+ root_logger = logging.getLogger()
98
+
99
+ # Check if our filter is already present
100
+ for existing_filter in root_logger.filters:
101
+ if isinstance(existing_filter, LibraryShutdownFilter):
102
+ return
103
+
104
+ # Add our production-quality filter
105
+ root_logger.addFilter(LibraryShutdownFilter())
106
+
107
+ def _setup_warning_filters(self):
108
+ """Set up Python warnings filters for clean shutdown."""
109
+ # Suppress specific asyncio/anyio warnings during shutdown
110
+ warning_patterns = [
111
+ ".*Attempted to exit cancel scope in a different task.*",
112
+ ".*coroutine was never awaited.*",
113
+ ".*Task was destroyed but it is pending.*",
114
+ ]
115
+
116
+ for pattern in warning_patterns:
117
+ warnings.filterwarnings("ignore", message=pattern, category=RuntimeWarning)
118
+ warnings.filterwarnings("ignore", message=pattern, category=ResourceWarning)
119
+
120
+ # Global manager instance
121
+ _logging_manager = LibraryLoggingManager()
122
+
123
+ # Initialize on module import
124
+ _logging_manager.initialize()
125
+
126
+ # Clean shutdown registration
127
+ atexit.register(lambda: None)
128
+
31
129
  # --------------------------------------------------------------------------- #
32
130
  # Per-task context storage
33
131
  # --------------------------------------------------------------------------- #
@@ -75,7 +173,7 @@ class LogContext:
75
173
  Async-safe context container.
76
174
 
77
175
  Holds a mutable dict that is *local* to the current asyncio task, so
78
- concurrent coroutines dont interfere with each other.
176
+ concurrent coroutines don't interfere with each other.
79
177
  """
80
178
 
81
179
  # ------------------------------------------------------------------ #
@@ -196,7 +294,7 @@ class StructuredAdapter(logging.LoggerAdapter):
196
294
  """
197
295
 
198
296
  # --------------------------- core hook -------------------------------- #
199
- def process(self, msg, kwargs): # noqa: D401 keep signature from base
297
+ def process(self, msg, kwargs): # noqa: D401 - keep signature from base
200
298
  kwargs = kwargs or {}
201
299
  extra = kwargs.get("extra", {}).copy()
202
300
  ctx = log_context.context
@@ -239,5 +337,10 @@ class StructuredAdapter(logging.LoggerAdapter):
239
337
  def get_logger(name: str) -> StructuredAdapter:
240
338
  """
241
339
  Return a :class:`StructuredAdapter` wrapping ``logging.getLogger(name)``.
340
+
341
+ Includes automatic initialization of clean shutdown behavior.
242
342
  """
243
- return StructuredAdapter(logging.getLogger(name), {})
343
+ # Ensure clean shutdown behavior is initialized
344
+ _logging_manager.initialize()
345
+
346
+ return StructuredAdapter(logging.getLogger(name), {})