chuk-tool-processor 0.9.2__py3-none-any.whl → 0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

@@ -77,11 +77,8 @@ class LibraryLoggingManager:
77
77
  self._initialized = False
78
78
  self._lock = threading.Lock()
79
79
 
80
- def initialize(self):
80
+ def initialize(self) -> None:
81
81
  """Initialize clean shutdown behavior for the library."""
82
- if self._initialized:
83
- return
84
-
85
82
  with self._lock:
86
83
  if self._initialized:
87
84
  return
@@ -299,7 +296,7 @@ class StructuredAdapter(logging.LoggerAdapter):
299
296
  return msg, kwargs
300
297
 
301
298
  # ----------------------- convenience wrappers ------------------------ #
302
- def _forward(self, method_name: str, msg, *args, **kwargs):
299
+ def _forward(self, method_name: str, msg: str, *args: Any, **kwargs: Any) -> None:
303
300
  """Common helper: process + forward to `self.logger.<method_name>`."""
304
301
  msg, kwargs = self.process(msg, kwargs)
305
302
  getattr(self.logger, method_name)(msg, *args, **kwargs)
@@ -9,6 +9,7 @@ Updated to support the latest MCP transports:
9
9
  """
10
10
 
11
11
  from chuk_tool_processor.mcp.mcp_tool import MCPTool
12
+ from chuk_tool_processor.mcp.models import MCPServerConfig, MCPTransport
12
13
  from chuk_tool_processor.mcp.register_mcp_tools import register_mcp_tools
13
14
  from chuk_tool_processor.mcp.setup_mcp_http_streamable import setup_mcp_http_streamable
14
15
  from chuk_tool_processor.mcp.setup_mcp_sse import setup_mcp_sse
@@ -23,6 +24,8 @@ __all__ = [
23
24
  "HTTPStreamableTransport",
24
25
  "StreamManager",
25
26
  "MCPTool",
27
+ "MCPServerConfig",
28
+ "MCPTransport",
26
29
  "register_mcp_tools",
27
30
  "setup_mcp_stdio",
28
31
  "setup_mcp_sse",
@@ -237,7 +237,12 @@ class MCPTool:
237
237
  await self._record_failure()
238
238
 
239
239
  if attempt == max_attempts - 1:
240
- return {"error": error_msg, "tool_name": self.tool_name, "available": False, "reason": "timeout"}
240
+ return {
241
+ "error": error_msg,
242
+ "tool_name": self.tool_name,
243
+ "available": False,
244
+ "reason": "timeout",
245
+ }
241
246
 
242
247
  except Exception as e:
243
248
  error_str = str(e)
@@ -260,12 +265,12 @@ class MCPTool:
260
265
  await asyncio.sleep(backoff)
261
266
  backoff = min(backoff * self.recovery_config.backoff_multiplier, self.recovery_config.max_backoff)
262
267
 
263
- # Should never reach here
268
+ # Should never reach here, but return error if we do
264
269
  return {
265
270
  "error": f"Tool '{self.tool_name}' failed after all attempts",
266
271
  "tool_name": self.tool_name,
267
272
  "available": False,
268
- "reason": "exhausted_retries",
273
+ "reason": "execution_failed",
269
274
  }
270
275
 
271
276
  async def _execute_with_timeout(self, timeout: float, **kwargs: Any) -> Any:
@@ -0,0 +1,87 @@
1
+ #!/usr/bin/env python
2
+ # chuk_tool_processor/mcp/models.py
3
+ """
4
+ Pydantic models for MCP server configurations.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from enum import Enum
10
+ from typing import Any
11
+
12
+ from pydantic import BaseModel, Field, model_validator
13
+
14
+
15
+ class MCPTransport(str, Enum):
16
+ """Supported MCP transport types."""
17
+
18
+ STDIO = "stdio"
19
+ SSE = "sse"
20
+ HTTP = "http"
21
+
22
+
23
+ class MCPServerConfig(BaseModel):
24
+ """Unified configuration for MCP servers (all transport types)."""
25
+
26
+ name: str = Field(description="Server identifier name")
27
+ transport: MCPTransport = Field(default=MCPTransport.STDIO, description="Transport protocol")
28
+
29
+ # STDIO fields
30
+ command: str | None = Field(default=None, description="Command to execute (stdio only)")
31
+ args: list[str] = Field(default_factory=list, description="Command arguments (stdio only)")
32
+ env: dict[str, str] | None = Field(default=None, description="Environment variables (stdio only)")
33
+
34
+ # SSE/HTTP fields
35
+ url: str | None = Field(default=None, description="Server URL (sse/http)")
36
+ headers: dict[str, str] = Field(default_factory=dict, description="HTTP headers (sse/http)")
37
+ timeout: float = Field(default=10.0, description="Connection timeout in seconds")
38
+ sse_read_timeout: float = Field(default=300.0, description="SSE read timeout in seconds (sse only)")
39
+ api_key: str | None = Field(default=None, description="API key extracted from Authorization header")
40
+ session_id: str | None = Field(default=None, description="Session ID for HTTP transport")
41
+
42
+ @model_validator(mode="after")
43
+ def validate_transport_fields(self) -> MCPServerConfig:
44
+ """Validate required fields based on transport type."""
45
+ if self.transport == MCPTransport.STDIO:
46
+ if not self.command:
47
+ raise ValueError("command is required for stdio transport")
48
+ else:
49
+ # SSE/HTTP
50
+ if not self.url:
51
+ raise ValueError(f"url is required for {self.transport} transport")
52
+ # Extract API key from Authorization header if present
53
+ if not self.api_key and self.headers:
54
+ auth_header = self.headers.get("Authorization", "")
55
+ if "Bearer " in auth_header:
56
+ self.api_key = auth_header.split("Bearer ")[-1]
57
+ return self
58
+
59
+ def to_dict(self) -> dict[str, Any]:
60
+ """Convert to dictionary for internal use."""
61
+ if self.transport == MCPTransport.STDIO:
62
+ result = {
63
+ "name": self.name,
64
+ "command": self.command,
65
+ "args": self.args,
66
+ }
67
+ if self.env:
68
+ result["env"] = self.env
69
+ return result
70
+ else:
71
+ # SSE/HTTP
72
+ result = {
73
+ "name": self.name,
74
+ "url": self.url,
75
+ "headers": self.headers,
76
+ "timeout": self.timeout,
77
+ }
78
+ if self.transport == MCPTransport.SSE:
79
+ result["sse_read_timeout"] = self.sse_read_timeout
80
+ if self.api_key:
81
+ result["api_key"] = self.api_key
82
+ if self.session_id:
83
+ result["session_id"] = self.session_id
84
+ return result
85
+
86
+
87
+ __all__ = ["MCPServerConfig", "MCPTransport"]
@@ -13,11 +13,16 @@ It:
13
13
 
14
14
  from __future__ import annotations
15
15
 
16
+ from typing import TYPE_CHECKING, Any
17
+
16
18
  from chuk_tool_processor.core.processor import ToolProcessor
17
19
  from chuk_tool_processor.logging import get_logger
18
20
  from chuk_tool_processor.mcp.register_mcp_tools import register_mcp_tools
19
21
  from chuk_tool_processor.mcp.stream_manager import StreamManager
20
22
 
23
+ if TYPE_CHECKING:
24
+ from chuk_tool_processor.mcp.models import MCPServerConfig
25
+
21
26
  logger = get_logger("chuk_tool_processor.mcp.setup_stdio")
22
27
 
23
28
 
@@ -26,8 +31,8 @@ logger = get_logger("chuk_tool_processor.mcp.setup_stdio")
26
31
  # --------------------------------------------------------------------------- #
27
32
  async def setup_mcp_stdio( # noqa: C901 - long but just a config facade
28
33
  *,
29
- config_file: str,
30
- servers: list[str],
34
+ config_file: str | None = None, # NOW OPTIONAL - for backward compatibility
35
+ servers: list[str] | list[dict[str, Any]] | list[MCPServerConfig], # Can be server names, dicts, OR Pydantic models
31
36
  server_names: dict[int, str] | None = None,
32
37
  default_timeout: float = 10.0,
33
38
  initialization_timeout: float = 60.0,
@@ -45,17 +50,92 @@ async def setup_mcp_stdio( # noqa: C901 - long but just a config facade
45
50
  Initialise stdio-transport MCP + a :class:`ToolProcessor`.
46
51
 
47
52
  Call with ``await`` from your async context.
53
+
54
+ Args:
55
+ config_file: Optional config file path (legacy mode)
56
+ servers: Can be:
57
+ - List of server names (legacy, requires config_file)
58
+ - List of server config dicts (new DX)
59
+ - List of MCPServerConfig Pydantic models (best DX)
60
+ server_names: Optional server name mapping
61
+ default_timeout: Default timeout for operations
62
+ initialization_timeout: Timeout for initialization
63
+ max_concurrency: Maximum concurrent operations
64
+ enable_caching: Enable result caching
65
+ cache_ttl: Cache time-to-live
66
+ enable_rate_limiting: Enable rate limiting
67
+ global_rate_limit: Global rate limit
68
+ tool_rate_limits: Per-tool rate limits
69
+ enable_retries: Enable retries
70
+ max_retries: Maximum retry attempts
71
+ namespace: Tool namespace
72
+
73
+ Returns:
74
+ Tuple of (ToolProcessor, StreamManager)
75
+
76
+ Examples:
77
+ # Best DX (Pydantic models):
78
+ from chuk_tool_processor.mcp import MCPServerConfig, MCPTransport
79
+
80
+ processor, manager = await setup_mcp_stdio(
81
+ servers=[
82
+ MCPServerConfig(
83
+ name="echo",
84
+ transport=MCPTransport.STDIO,
85
+ command="uvx",
86
+ args=["chuk-mcp-echo", "stdio"],
87
+ ),
88
+ ],
89
+ namespace="tools",
90
+ )
91
+
92
+ # New DX (dicts, no config file):
93
+ processor, manager = await setup_mcp_stdio(
94
+ servers=[
95
+ {"name": "echo", "command": "uvx", "args": ["chuk-mcp-echo", "stdio"]},
96
+ ],
97
+ namespace="tools",
98
+ )
99
+
100
+ # Legacy (with config file):
101
+ processor, manager = await setup_mcp_stdio(
102
+ config_file="mcp_config.json",
103
+ servers=["echo"],
104
+ namespace="tools",
105
+ )
48
106
  """
49
- # 1️⃣ create & connect the stream-manager
50
- # FIXED: Pass the default_timeout parameter to StreamManager.create
51
- stream_manager = await StreamManager.create(
52
- config_file=config_file,
53
- servers=servers,
54
- server_names=server_names,
55
- transport_type="stdio",
56
- default_timeout=default_timeout, # 🔧 ADD THIS LINE
57
- initialization_timeout=initialization_timeout,
58
- )
107
+ # Import here to avoid circular dependency at module level
108
+ from chuk_tool_processor.mcp.models import MCPServerConfig as MCPServerConfigModel
109
+
110
+ # Check what format the servers are in
111
+ if servers and isinstance(servers[0], str):
112
+ # LEGACY: servers are names, config_file is required
113
+ if config_file is None:
114
+ raise ValueError("config_file is required when servers is a list of strings")
115
+
116
+ stream_manager = await StreamManager.create(
117
+ config_file=config_file,
118
+ servers=servers, # type: ignore[arg-type]
119
+ server_names=server_names,
120
+ transport_type="stdio",
121
+ default_timeout=default_timeout,
122
+ initialization_timeout=initialization_timeout,
123
+ )
124
+ else:
125
+ # NEW DX: servers are config dicts or Pydantic models
126
+ # Convert Pydantic models to dicts if needed
127
+ server_dicts: list[dict[str, Any]]
128
+ if servers and isinstance(servers[0], MCPServerConfigModel):
129
+ server_dicts = [s.to_dict() for s in servers] # type: ignore[union-attr]
130
+ else:
131
+ server_dicts = servers # type: ignore[assignment]
132
+
133
+ stream_manager = await StreamManager.create_with_stdio(
134
+ servers=server_dicts,
135
+ server_names=server_names,
136
+ default_timeout=default_timeout,
137
+ initialization_timeout=initialization_timeout,
138
+ )
59
139
 
60
140
  # 2️⃣ pull the remote tool list and register each one locally
61
141
  registered = await register_mcp_tools(stream_manager, namespace=namespace)
@@ -96,6 +96,24 @@ class StreamManager:
96
96
  )
97
97
  return inst
98
98
 
99
+ @classmethod
100
+ async def create_with_stdio(
101
+ cls,
102
+ servers: list[dict[str, Any]],
103
+ server_names: dict[int, str] | None = None,
104
+ default_timeout: float = 30.0,
105
+ initialization_timeout: float = 60.0,
106
+ ) -> StreamManager:
107
+ """Create StreamManager with STDIO transport and timeout protection (no config file needed)."""
108
+ inst = cls()
109
+ await inst.initialize_with_stdio(
110
+ servers,
111
+ server_names,
112
+ default_timeout=default_timeout,
113
+ initialization_timeout=initialization_timeout,
114
+ )
115
+ return inst
116
+
99
117
  @classmethod
100
118
  async def create_with_http_streamable(
101
119
  cls,
@@ -373,6 +391,82 @@ class StreamManager:
373
391
  len(self.all_tools),
374
392
  )
375
393
 
394
+ async def initialize_with_stdio(
395
+ self,
396
+ servers: list[dict[str, Any]],
397
+ server_names: dict[int, str] | None = None,
398
+ default_timeout: float = 30.0,
399
+ initialization_timeout: float = 60.0,
400
+ ) -> None:
401
+ """Initialize with STDIO transport directly from server configs (no config file needed)."""
402
+ if self._closed:
403
+ raise RuntimeError("Cannot initialize a closed StreamManager")
404
+
405
+ async with self._lock:
406
+ self.server_names = server_names or {}
407
+
408
+ for idx, cfg in enumerate(servers):
409
+ name = cfg.get("name")
410
+ command = cfg.get("command")
411
+ args = cfg.get("args", [])
412
+ env = cfg.get("env")
413
+
414
+ if not (name and command):
415
+ logger.error("Bad STDIO server config (missing name or command): %s", cfg)
416
+ continue
417
+
418
+ try:
419
+ # Build STDIO transport parameters
420
+ transport_params = {
421
+ "command": command,
422
+ "args": args,
423
+ }
424
+ if env:
425
+ transport_params["env"] = env
426
+
427
+ logger.debug("STDIO %s: command=%s, args=%s", name, command, args)
428
+
429
+ transport = StdioTransport(
430
+ transport_params, connection_timeout=initialization_timeout, default_timeout=default_timeout
431
+ )
432
+
433
+ try:
434
+ if not await asyncio.wait_for(transport.initialize(), timeout=initialization_timeout):
435
+ logger.warning("Failed to init STDIO %s", name)
436
+ continue
437
+ except TimeoutError:
438
+ logger.error("Timeout initialising STDIO %s (timeout=%ss)", name, initialization_timeout)
439
+ continue
440
+
441
+ self.transports[name] = transport
442
+
443
+ # Ping and get tools with timeout protection
444
+ status = (
445
+ "Up"
446
+ if await asyncio.wait_for(transport.send_ping(), timeout=self.timeout_config.operation)
447
+ else "Down"
448
+ )
449
+ tools = await asyncio.wait_for(transport.get_tools(), timeout=self.timeout_config.operation)
450
+
451
+ for t in tools:
452
+ tname = t.get("name")
453
+ if tname:
454
+ self.tool_to_server_map[tname] = name
455
+ self.all_tools.extend(tools)
456
+
457
+ self.server_info.append({"id": idx, "name": name, "tools": len(tools), "status": status})
458
+ logger.debug("Initialised STDIO %s - %d tool(s)", name, len(tools))
459
+ except TimeoutError:
460
+ logger.error("Timeout initialising STDIO %s", name)
461
+ except Exception as exc:
462
+ logger.error("Error initialising STDIO %s: %s", name, exc)
463
+
464
+ logger.debug(
465
+ "StreamManager ready - %d STDIO server(s), %d tool(s)",
466
+ len(self.transports),
467
+ len(self.all_tools),
468
+ )
469
+
376
470
  async def initialize_with_http_streamable(
377
471
  self,
378
472
  servers: list[dict[str, str]],
@@ -239,13 +239,13 @@ class HTTPStreamableTransport(MCPBaseTransport):
239
239
  await self._cleanup()
240
240
  if self.enable_metrics and self._metrics:
241
241
  self._metrics.connection_errors += 1
242
- return False
242
+ raise # Re-raise for OAuth error detection in mcp-cli
243
243
  except Exception as e:
244
244
  logger.error("Error initializing HTTP Streamable transport: %s", e, exc_info=True)
245
245
  await self._cleanup()
246
246
  if self.enable_metrics and self._metrics:
247
247
  self._metrics.connection_errors += 1
248
- return False
248
+ raise # Re-raise for OAuth error detection in mcp-cli
249
249
 
250
250
  async def _attempt_recovery(self) -> bool:
251
251
  """Attempt to recover from connection issues (NEW - like SSE resilience)."""
@@ -1,6 +1,6 @@
1
1
  # chuk_tool_processor/models/tool_export_mix_in.py
2
2
 
3
- from typing import Any, Protocol, runtime_checkable
3
+ from typing import Any, Protocol, cast, runtime_checkable
4
4
 
5
5
  from pydantic import BaseModel
6
6
 
@@ -18,7 +18,7 @@ class ToolExportMixin:
18
18
  @classmethod
19
19
  def to_openai(cls) -> dict[str, Any]:
20
20
  assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
21
- schema = cls.Arguments.model_json_schema() # type: ignore[attr-defined]
21
+ schema = cls.Arguments.model_json_schema() # noqa: ANN401
22
22
  return {
23
23
  "type": "function",
24
24
  "function": {
@@ -31,13 +31,13 @@ class ToolExportMixin:
31
31
  @classmethod
32
32
  def to_json_schema(cls) -> dict[str, Any]:
33
33
  assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
34
- return cls.Arguments.model_json_schema() # type: ignore[attr-defined, no-any-return]
34
+ return cast(dict[str, Any], cls.Arguments.model_json_schema())
35
35
 
36
36
  @classmethod
37
37
  def to_xml(cls) -> str:
38
38
  """Very small helper so existing XML-based parsers still work."""
39
39
  assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
40
40
  name = cls.__name__.removesuffix("Tool").lower()
41
- params = cls.Arguments.model_json_schema()["properties"] # type: ignore[attr-defined]
41
+ params = cls.Arguments.model_json_schema()["properties"] # noqa: ANN401
42
42
  args = ", ".join(params)
43
43
  return f'<tool name="{name}" args="{{{args}}}"/>'
@@ -7,12 +7,12 @@ Provides drop-in Prometheus metrics with a /metrics HTTP endpoint.
7
7
  from __future__ import annotations
8
8
 
9
9
  import time
10
- from typing import TYPE_CHECKING
10
+ from typing import TYPE_CHECKING, Any
11
11
 
12
12
  from chuk_tool_processor.logging import get_logger
13
13
 
14
14
  if TYPE_CHECKING:
15
- from prometheus_client import Counter, Gauge, Histogram # type: ignore[import-not-found]
15
+ from prometheus_client import Counter, Gauge, Histogram
16
16
 
17
17
  logger = get_logger("chuk_tool_processor.observability.metrics")
18
18
 
@@ -299,7 +299,7 @@ class MetricsTimer:
299
299
  self.start_time = time.perf_counter()
300
300
  return self
301
301
 
302
- def __exit__(self, exc_type, exc_val, exc_tb) -> None:
302
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
303
303
  self.end_time = time.perf_counter()
304
304
 
305
305
  @property
@@ -6,13 +6,14 @@ Provides drop-in distributed tracing with standardized span names and attributes
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ from collections.abc import Generator
9
10
  from contextlib import contextmanager
10
11
  from typing import TYPE_CHECKING, Any
11
12
 
12
13
  from chuk_tool_processor.logging import get_logger
13
14
 
14
15
  if TYPE_CHECKING:
15
- from opentelemetry.trace import Span, Tracer # type: ignore[import-not-found]
16
+ from opentelemetry.trace import Span, Tracer
16
17
 
17
18
  logger = get_logger("chuk_tool_processor.observability.tracing")
18
19
 
@@ -34,13 +35,13 @@ def init_tracer(service_name: str = "chuk-tool-processor") -> Tracer | NoOpTrace
34
35
  global _tracer, _tracing_enabled
35
36
 
36
37
  try:
37
- from opentelemetry import trace # type: ignore[import-not-found]
38
- from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( # type: ignore[import-not-found]
38
+ from opentelemetry import trace
39
+ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
39
40
  OTLPSpanExporter,
40
41
  )
41
- from opentelemetry.sdk.resources import Resource # type: ignore[import-not-found]
42
- from opentelemetry.sdk.trace import TracerProvider # type: ignore[import-not-found]
43
- from opentelemetry.sdk.trace.export import BatchSpanProcessor # type: ignore[import-not-found]
42
+ from opentelemetry.sdk.resources import Resource
43
+ from opentelemetry.sdk.trace import TracerProvider
44
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
44
45
 
45
46
  # Create resource with service name
46
47
  resource = Resource.create({"service.name": service_name})
@@ -89,7 +90,7 @@ def trace_tool_execution(
89
90
  tool: str,
90
91
  namespace: str | None = None,
91
92
  attributes: dict[str, Any] | None = None,
92
- ):
93
+ ) -> Generator[None, None, None]:
93
94
  """
94
95
  Context manager for tracing tool execution.
95
96
 
@@ -135,7 +136,7 @@ def trace_cache_operation(
135
136
  tool: str,
136
137
  hit: bool | None = None,
137
138
  attributes: dict[str, Any] | None = None,
138
- ):
139
+ ) -> Generator[None, None, None]:
139
140
  """
140
141
  Context manager for tracing cache operations.
141
142
 
@@ -179,7 +180,7 @@ def trace_retry_attempt(
179
180
  attempt: int,
180
181
  max_retries: int,
181
182
  attributes: dict[str, Any] | None = None,
182
- ):
183
+ ) -> Generator[None, None, None]:
183
184
  """
184
185
  Context manager for tracing retry attempts.
185
186
 
@@ -220,7 +221,7 @@ def trace_circuit_breaker(
220
221
  tool: str,
221
222
  state: str,
222
223
  attributes: dict[str, Any] | None = None,
223
- ):
224
+ ) -> Generator[None, None, None]:
224
225
  """
225
226
  Context manager for tracing circuit breaker operations.
226
227
 
@@ -259,7 +260,7 @@ def trace_rate_limit(
259
260
  tool: str,
260
261
  allowed: bool,
261
262
  attributes: dict[str, Any] | None = None,
262
- ):
263
+ ) -> Generator[None, None, None]:
263
264
  """
264
265
  Context manager for tracing rate limiting.
265
266
 
@@ -340,6 +341,6 @@ class NoOpTracer:
340
341
  """No-op tracer when OpenTelemetry is not available."""
341
342
 
342
343
  @contextmanager
343
- def start_as_current_span(self, _name: str, **_kwargs):
344
+ def start_as_current_span(self, _name: str, **_kwargs: Any) -> Generator[None, None, None]:
344
345
  """No-op span context manager."""
345
346
  yield None
File without changes
@@ -36,7 +36,7 @@ class ToolRegistryInterface(Protocol):
36
36
  namespace: Namespace for the tool (default: "default").
37
37
  metadata: Optional additional metadata for the tool.
38
38
  """
39
- ...
39
+ ... # pragma: no cover
40
40
 
41
41
  async def get_tool(self, name: str, namespace: str = "default") -> Any | None:
42
42
  """
@@ -49,7 +49,7 @@ class ToolRegistryInterface(Protocol):
49
49
  Returns:
50
50
  The tool implementation or None if not found.
51
51
  """
52
- ...
52
+ ... # pragma: no cover
53
53
 
54
54
  async def get_tool_strict(self, name: str, namespace: str = "default") -> Any:
55
55
  """
@@ -65,7 +65,7 @@ class ToolRegistryInterface(Protocol):
65
65
  Raises:
66
66
  ToolNotFoundError: If the tool is not found in the registry.
67
67
  """
68
- ...
68
+ ... # pragma: no cover
69
69
 
70
70
  async def get_metadata(self, name: str, namespace: str = "default") -> ToolMetadata | None:
71
71
  """
@@ -78,7 +78,7 @@ class ToolRegistryInterface(Protocol):
78
78
  Returns:
79
79
  ToolMetadata if found, None otherwise.
80
80
  """
81
- ...
81
+ ... # pragma: no cover
82
82
 
83
83
  async def list_tools(self, namespace: str | None = None) -> list[tuple[str, str]]:
84
84
  """
@@ -90,7 +90,7 @@ class ToolRegistryInterface(Protocol):
90
90
  Returns:
91
91
  List of (namespace, name) tuples.
92
92
  """
93
- ...
93
+ ... # pragma: no cover
94
94
 
95
95
  async def list_namespaces(self) -> list[str]:
96
96
  """
@@ -99,7 +99,7 @@ class ToolRegistryInterface(Protocol):
99
99
  Returns:
100
100
  List of namespace names.
101
101
  """
102
- ...
102
+ ... # pragma: no cover
103
103
 
104
104
  async def list_metadata(self, namespace: str | None = None) -> list[ToolMetadata]:
105
105
  """
@@ -113,4 +113,4 @@ class ToolRegistryInterface(Protocol):
113
113
  Returns:
114
114
  List of ToolMetadata objects.
115
115
  """
116
- ...
116
+ ... # pragma: no cover
@@ -5,6 +5,7 @@ Async registry provider implementations and factory functions.
5
5
 
6
6
  import asyncio
7
7
  import os
8
+ from typing import Any
8
9
 
9
10
  from chuk_tool_processor.registry.interface import ToolRegistryInterface
10
11
 
@@ -13,7 +14,7 @@ _REGISTRY_CACHE: dict[str, ToolRegistryInterface] = {}
13
14
  _REGISTRY_LOCKS: dict[str, asyncio.Lock] = {}
14
15
 
15
16
 
16
- async def get_registry(provider_type: str | None = None, **kwargs) -> ToolRegistryInterface:
17
+ async def get_registry(provider_type: str | None = None, **kwargs: Any) -> ToolRegistryInterface:
17
18
  """
18
19
  Factory function to get a registry implementation asynchronously.
19
20
 
@@ -28,13 +28,8 @@ async def _build_openai_name_cache() -> None:
28
28
  """
29
29
  global _OPENAI_NAME_CACHE
30
30
 
31
- # Fast path - cache already exists
32
- if _OPENAI_NAME_CACHE is not None:
33
- return
34
-
35
- # Slow path - build the cache with proper locking
31
+ # Build the cache with proper locking
36
32
  async with _CACHE_LOCK:
37
- # Double-check pattern: check again after acquiring the lock
38
33
  if _OPENAI_NAME_CACHE is not None:
39
34
  return
40
35