chuk-tool-processor 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (32) hide show
  1. chuk_tool_processor/core/processor.py +1 -1
  2. chuk_tool_processor/execution/strategies/inprocess_strategy.py +110 -148
  3. chuk_tool_processor/execution/strategies/subprocess_strategy.py +1 -1
  4. chuk_tool_processor/logging/__init__.py +35 -0
  5. chuk_tool_processor/logging/context.py +47 -0
  6. chuk_tool_processor/logging/formatter.py +55 -0
  7. chuk_tool_processor/logging/helpers.py +112 -0
  8. chuk_tool_processor/logging/metrics.py +59 -0
  9. chuk_tool_processor/models/execution_strategy.py +1 -1
  10. chuk_tool_processor/models/tool_export_mixin.py +29 -0
  11. chuk_tool_processor/models/validated_tool.py +155 -0
  12. chuk_tool_processor/plugins/discovery.py +105 -172
  13. chuk_tool_processor/plugins/parsers/__init__.py +1 -1
  14. chuk_tool_processor/plugins/parsers/base.py +18 -0
  15. chuk_tool_processor/plugins/parsers/function_call_tool_plugin.py +81 -0
  16. chuk_tool_processor/plugins/parsers/json_tool_plugin.py +38 -0
  17. chuk_tool_processor/plugins/parsers/openai_tool_plugin.py +76 -0
  18. chuk_tool_processor/plugins/parsers/xml_tool.py +28 -24
  19. chuk_tool_processor/registry/__init__.py +11 -10
  20. chuk_tool_processor/registry/auto_register.py +125 -0
  21. chuk_tool_processor/registry/provider.py +84 -29
  22. chuk_tool_processor/registry/providers/memory.py +77 -112
  23. chuk_tool_processor/registry/tool_export.py +76 -0
  24. chuk_tool_processor/utils/validation.py +106 -177
  25. {chuk_tool_processor-0.1.0.dist-info → chuk_tool_processor-0.1.1.dist-info}/METADATA +5 -2
  26. chuk_tool_processor-0.1.1.dist-info/RECORD +47 -0
  27. chuk_tool_processor/plugins/parsers/function_call_tool.py +0 -105
  28. chuk_tool_processor/plugins/parsers/json_tool.py +0 -17
  29. chuk_tool_processor/utils/logging.py +0 -260
  30. chuk_tool_processor-0.1.0.dist-info/RECORD +0 -37
  31. {chuk_tool_processor-0.1.0.dist-info → chuk_tool_processor-0.1.1.dist-info}/WHEEL +0 -0
  32. {chuk_tool_processor-0.1.0.dist-info → chuk_tool_processor-0.1.1.dist-info}/top_level.txt +0 -0
@@ -13,7 +13,7 @@ from chuk_tool_processor.execution.wrappers.caching import CacheInterface, InMem
13
13
  from chuk_tool_processor.execution.wrappers.rate_limiting import RateLimiter, RateLimitedToolExecutor
14
14
  from chuk_tool_processor.execution.wrappers.retry import RetryConfig, RetryableToolExecutor
15
15
  from chuk_tool_processor.plugins.discovery import plugin_registry, discover_default_plugins
16
- from chuk_tool_processor.utils.logging import get_logger, log_context_span, request_logging, log_tool_call, metrics
16
+ from chuk_tool_processor.logging import get_logger, log_context_span, request_logging, log_tool_call, metrics
17
17
 
18
18
 
19
19
  class ToolProcessor:
@@ -1,206 +1,168 @@
1
- # chuk_tool_processor/execution/inprocess_strategy.py
1
+ """
2
+ In-process execution strategy with sync/async support.
3
+
4
+ This version prefers the public `execute()` wrapper (with validation and
5
+ defaults) over the private `_execute` implementation, fixing missing-argument
6
+ errors for `ValidatedTool` subclasses.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
2
11
  import asyncio
12
+ import inspect
3
13
  import os
4
14
  from datetime import datetime, timezone
5
- from typing import List, Optional, Dict, Any
15
+ from typing import Any, List, Optional
6
16
 
7
- # imports
17
+ from chuk_tool_processor.core.exceptions import ToolExecutionError
18
+ from chuk_tool_processor.models.execution_strategy import ExecutionStrategy
8
19
  from chuk_tool_processor.models.tool_call import ToolCall
9
20
  from chuk_tool_processor.models.tool_result import ToolResult
10
- from chuk_tool_processor.models.execution_strategy import ExecutionStrategy
11
21
  from chuk_tool_processor.registry.interface import ToolRegistryInterface
12
- from chuk_tool_processor.core.exceptions import ToolNotFoundError, ToolTimeoutError, ToolExecutionError
13
- from chuk_tool_processor.utils.logging import get_logger
22
+ from chuk_tool_processor.logging import get_logger
14
23
 
15
24
  logger = get_logger("chuk_tool_processor.execution.inprocess_strategy")
16
25
 
26
+
17
27
  class InProcessStrategy(ExecutionStrategy):
18
- """
19
- In-process execution strategy with concurrent execution support.
20
- """
28
+ """Run tools inside the current interpreter, concurrently."""
29
+
21
30
  def __init__(
22
- self,
23
- registry: ToolRegistryInterface,
24
- default_timeout: Optional[float] = None,
25
- max_concurrency: Optional[int] = None
26
- ):
27
- """
28
- Initialize the strategy.
29
-
30
- Args:
31
- registry: Tool registry to look up tools.
32
- default_timeout: Default timeout for tool executions.
33
- max_concurrency: Maximum number of concurrent tool executions (default: None = unlimited).
34
- """
31
+ self,
32
+ registry: ToolRegistryInterface,
33
+ default_timeout: float | None = None,
34
+ max_concurrency: int | None = None,
35
+ ) -> None:
35
36
  self.registry = registry
36
37
  self.default_timeout = default_timeout
37
- self.max_concurrency = max_concurrency
38
- self._semaphore = asyncio.Semaphore(max_concurrency) if max_concurrency else None
38
+ self._sem = asyncio.Semaphore(max_concurrency) if max_concurrency else None
39
39
 
40
+ # ------------------------------------------------------------------ #
41
+ # public API
42
+ # ------------------------------------------------------------------ #
40
43
  async def run(
41
44
  self,
42
45
  calls: List[ToolCall],
43
- timeout: Optional[float] = None
46
+ timeout: float | None = None,
44
47
  ) -> List[ToolResult]:
45
- """
46
- Execute tool calls concurrently with timeout.
47
-
48
- Args:
49
- calls: List of tool calls to execute.
50
- timeout: Optional timeout that overrides the default.
51
-
52
- Returns:
53
- List of tool results in the same order as the calls.
54
- """
55
- # Create tasks for each call
56
- tasks = []
57
- for call in calls:
58
- task = self._execute_single_call(call, timeout if timeout is not None else self.default_timeout)
59
- tasks.append(task)
60
-
61
- # Run all tasks concurrently and gather results
62
- results = await asyncio.gather(*tasks)
63
- return results
48
+ tasks = [
49
+ self._execute_single_call(call, timeout or self.default_timeout)
50
+ for call in calls
51
+ ]
52
+ return await asyncio.gather(*tasks)
64
53
 
54
+ # ------------------------------------------------------------------ #
55
+ # helpers
56
+ # ------------------------------------------------------------------ #
65
57
  async def _execute_single_call(
66
- self,
67
- call: ToolCall,
68
- timeout: Optional[float]
58
+ self,
59
+ call: ToolCall,
60
+ timeout: float | None,
69
61
  ) -> ToolResult:
70
- """
71
- Execute a single tool call with timeout.
72
-
73
- Args:
74
- call: Tool call to execute.
75
- timeout: Optional timeout in seconds.
76
-
77
- Returns:
78
- Tool result with execution metadata.
79
- """
80
- # Get execution metadata
81
62
  pid = os.getpid()
82
63
  machine = os.uname().nodename
83
- start_time = datetime.now(timezone.utc)
84
-
85
- # Look up the tool
86
- tool_impl = self.registry.get_tool(call.tool)
87
- if not tool_impl:
88
- end_time = datetime.now(timezone.utc)
64
+ start = datetime.now(timezone.utc)
65
+
66
+ impl = self.registry.get_tool(call.tool)
67
+ if impl is None:
89
68
  return ToolResult(
90
69
  tool=call.tool,
91
70
  result=None,
92
- error="Tool not found", # Keep this message exactly as "Tool not found" for test compatibility
93
- start_time=start_time,
94
- end_time=end_time,
71
+ error="Tool not found",
72
+ start_time=start,
73
+ end_time=datetime.now(timezone.utc),
95
74
  machine=machine,
96
- pid=pid
75
+ pid=pid,
97
76
  )
98
-
99
- # Execute with concurrency control if needed
77
+
100
78
  try:
101
- if self._semaphore:
102
- async with self._semaphore:
103
- return await self._run_with_timeout(tool_impl, call, timeout, start_time, machine, pid)
104
- else:
105
- return await self._run_with_timeout(tool_impl, call, timeout, start_time, machine, pid)
106
- except Exception as e:
107
- # Catch any uncaught exceptions
108
- end_time = datetime.now(timezone.utc)
79
+ run = self._run_with_timeout
80
+ if self._sem is None:
81
+ return await run(impl, call, timeout, start, machine, pid)
82
+ async with self._sem:
83
+ return await run(impl, call, timeout, start, machine, pid)
84
+ except Exception as exc: # pragma: no cover – safety net
85
+ logger.exception("Unexpected error while executing %s", call.tool)
109
86
  return ToolResult(
110
87
  tool=call.tool,
111
88
  result=None,
112
- error=f"Unexpected error: {str(e)}",
113
- start_time=start_time,
114
- end_time=end_time,
89
+ error=f"Unexpected error: {exc}",
90
+ start_time=start,
91
+ end_time=datetime.now(timezone.utc),
115
92
  machine=machine,
116
- pid=pid
93
+ pid=pid,
117
94
  )
118
95
 
96
+ # ------------------------------------------------------------------ #
97
+ # core execution with timeout
98
+ # ------------------------------------------------------------------ #
119
99
  async def _run_with_timeout(
120
100
  self,
121
- tool_impl: Any,
101
+ impl: Any,
122
102
  call: ToolCall,
123
- timeout: Optional[float],
124
- start_time: datetime,
103
+ timeout: float | None,
104
+ start: datetime,
125
105
  machine: str,
126
- pid: int
106
+ pid: int,
127
107
  ) -> ToolResult:
128
- """
129
- Execute a tool with timeout handling.
130
- """
131
- try:
132
- # Determine if we need to instantiate the tool
133
- # If tool_impl is a class (not an instance), instantiate it
134
- if isinstance(tool_impl, type):
135
- tool_instance = tool_impl()
136
- else:
137
- tool_instance = tool_impl
138
-
139
- # Get the tool metadata to check if it's async
140
- metadata = self.registry.get_metadata(call.tool) if hasattr(self.registry, "get_metadata") else None
141
- is_async = metadata.is_async if metadata else asyncio.iscoroutinefunction(tool_instance.execute)
142
-
143
- # Call the tool implementation
108
+ tool = impl() if isinstance(impl, type) else impl
109
+
110
+ # ------------------------------------------------------------------
111
+ # Entry-point selection order:
112
+ # 1. `_aexecute` (async special case)
113
+ # 2. `execute` (public wrapper WITH validation & defaults)
114
+ # 3. `_execute` (fallback / legacy)
115
+ # ------------------------------------------------------------------
116
+ if hasattr(tool, "_aexecute") and inspect.iscoroutinefunction(tool._aexecute):
117
+ fn = tool._aexecute
118
+ is_async = True
119
+ elif hasattr(tool, "execute"):
120
+ fn = tool.execute
121
+ is_async = inspect.iscoroutinefunction(fn)
122
+ elif hasattr(tool, "_execute"):
123
+ fn = tool._execute
124
+ is_async = inspect.iscoroutinefunction(fn)
125
+ else:
126
+ raise ToolExecutionError(
127
+ f"Tool '{call.tool}' must implement _execute, execute or _aexecute"
128
+ )
129
+
130
+ async def _invoke():
144
131
  if is_async:
145
- # Direct async call
146
- if timeout:
147
- result_value = await asyncio.wait_for(
148
- tool_instance.execute(**call.arguments),
149
- timeout
150
- )
151
- else:
152
- result_value = await tool_instance.execute(**call.arguments)
153
- else:
154
- # Run sync function in executor
155
- loop = asyncio.get_running_loop()
156
- if timeout:
157
- result_value = await asyncio.wait_for(
158
- loop.run_in_executor(
159
- None,
160
- lambda: tool_instance.execute(**call.arguments)
161
- ),
162
- timeout
163
- )
164
- else:
165
- result_value = await loop.run_in_executor(
166
- None,
167
- lambda: tool_instance.execute(**call.arguments)
168
- )
169
-
170
- # Create successful result
171
- end_time = datetime.now(timezone.utc)
132
+ return await fn(**call.arguments)
133
+ loop = asyncio.get_running_loop()
134
+ return await loop.run_in_executor(None, lambda: fn(**call.arguments))
135
+
136
+ try:
137
+ result_val = (
138
+ await asyncio.wait_for(_invoke(), timeout) if timeout else await _invoke()
139
+ )
172
140
  return ToolResult(
173
141
  tool=call.tool,
174
- result=result_value,
142
+ result=result_val,
175
143
  error=None,
176
- start_time=start_time,
177
- end_time=end_time,
144
+ start_time=start,
145
+ end_time=datetime.now(timezone.utc),
178
146
  machine=machine,
179
- pid=pid
147
+ pid=pid,
180
148
  )
181
-
182
149
  except asyncio.TimeoutError:
183
- # Handle timeout
184
- end_time = datetime.now(timezone.utc)
185
150
  return ToolResult(
186
151
  tool=call.tool,
187
152
  result=None,
188
153
  error=f"Timeout after {timeout}s",
189
- start_time=start_time,
190
- end_time=end_time,
154
+ start_time=start,
155
+ end_time=datetime.now(timezone.utc),
191
156
  machine=machine,
192
- pid=pid
157
+ pid=pid,
193
158
  )
194
-
195
- except Exception as e:
196
- # Handle execution error
197
- end_time = datetime.now(timezone.utc)
159
+ except Exception as exc:
198
160
  return ToolResult(
199
161
  tool=call.tool,
200
162
  result=None,
201
- error=str(e),
202
- start_time=start_time,
203
- end_time=end_time,
163
+ error=str(exc),
164
+ start_time=start,
165
+ end_time=datetime.now(timezone.utc),
204
166
  machine=machine,
205
- pid=pid
206
- )
167
+ pid=pid,
168
+ )
@@ -12,7 +12,7 @@ from concurrent.futures import ProcessPoolExecutor
12
12
  from chuk_tool_processor.models.execution_strategy import ExecutionStrategy
13
13
  from chuk_tool_processor.models.tool_call import ToolCall
14
14
  from chuk_tool_processor.models.tool_result import ToolResult
15
- from chuk_tool_processor.utils.logging import get_logger
15
+ from chuk_tool_processor.logging import get_logger
16
16
 
17
17
  logger = get_logger("chuk_tool_processor.execution.subprocess_strategy")
18
18
 
@@ -0,0 +1,35 @@
1
+ # chuk_tool_processor/logging/__init__.py
2
+ """
3
+ Public façade for chuk_tool_processor structured logging.
4
+
5
+ Other modules can continue to import:
6
+
7
+ from chuk_tool_processor.logging import get_logger, log_context_span, ...
8
+ """
9
+
10
+ from __future__ import annotations
11
+ import logging
12
+ import sys
13
+
14
+ from .formatter import StructuredFormatter
15
+ from .context import get_logger, log_context, StructuredAdapter
16
+ from .helpers import log_context_span, request_logging, log_tool_call, metrics
17
+
18
+ __all__ = [
19
+ "get_logger",
20
+ "log_context_span",
21
+ "request_logging",
22
+ "log_tool_call",
23
+ "metrics",
24
+ ]
25
+
26
+ # --------------------------------------------------------------------------- #
27
+ # root logger & handler wiring (done once at import time)
28
+ # --------------------------------------------------------------------------- #
29
+ root_logger = logging.getLogger("chuk_tool_processor")
30
+ root_logger.setLevel(logging.INFO)
31
+
32
+ _handler = logging.StreamHandler(sys.stderr)
33
+ _handler.setLevel(logging.INFO)
34
+ _handler.setFormatter(StructuredFormatter())
35
+ root_logger.addHandler(_handler)
@@ -0,0 +1,47 @@
1
+ # chuk_tool_processor/logging/context.py
2
+ from __future__ import annotations
3
+ import logging
4
+ import uuid
5
+ from typing import Any, Dict, Optional
6
+
7
+ __all__ = ["log_context", "StructuredAdapter", "get_logger"]
8
+
9
+
10
+ class LogContext:
11
+ """Thread-local dict for request / span ids."""
12
+
13
+ def __init__(self):
14
+ self.context: Dict[str, Any] = {}
15
+ self.request_id: str | None = None
16
+
17
+ # simple helpers ----------------------------------------------------
18
+ def update(self, kv: Dict[str, Any]): self.context.update(kv)
19
+ def clear(self): self.context.clear()
20
+ def get_copy(self) -> Dict[str, Any]: return self.context.copy()
21
+
22
+ # convenience -------------------------------------------------------
23
+ def start_request(self, request_id: str | None = None) -> str:
24
+ self.request_id = request_id or str(uuid.uuid4())
25
+ self.context["request_id"] = self.request_id
26
+ return self.request_id
27
+
28
+ def end_request(self): self.clear()
29
+
30
+
31
+ log_context = LogContext()
32
+
33
+
34
+ class StructuredAdapter(logging.LoggerAdapter):
35
+ """Inject `log_context.context` into every log record."""
36
+
37
+ def process(self, msg, kwargs):
38
+ kwargs = kwargs or {}
39
+ extra = kwargs.get("extra", {})
40
+ if log_context.context:
41
+ extra.setdefault("context", {}).update(log_context.get_copy())
42
+ kwargs["extra"] = extra
43
+ return msg, kwargs
44
+
45
+
46
+ def get_logger(name: str) -> StructuredAdapter:
47
+ return StructuredAdapter(logging.getLogger(name), {})
@@ -0,0 +1,55 @@
1
+ # chuk_tool_processor/logging/formatter.py
2
+ from __future__ import annotations
3
+ import json
4
+ import logging
5
+ from datetime import datetime, timezone
6
+ from typing import Any
7
+
8
+ __all__ = ["StructuredFormatter"]
9
+
10
+
11
+ class StructuredFormatter(logging.Formatter):
12
+ """
13
+ JSON formatter that can serialise BaseModels, datetimes, sets, etc.
14
+ """
15
+
16
+ @staticmethod
17
+ def _json_default(obj: Any):
18
+ # pydantic models → dict
19
+ try:
20
+ from pydantic import BaseModel
21
+ if isinstance(obj, BaseModel):
22
+ return obj.model_dump()
23
+ except ImportError:
24
+ pass
25
+ # datetimes → ISO
26
+ from datetime import date
27
+ if isinstance(obj, (datetime, date)):
28
+ return obj.isoformat()
29
+ # sets → list
30
+ if isinstance(obj, (set, frozenset)):
31
+ return list(obj)
32
+ # fall back
33
+ return str(obj)
34
+
35
+ def format(self, record: logging.LogRecord) -> str: # noqa: D401
36
+ data = {
37
+ "timestamp": datetime.fromtimestamp(record.created, timezone.utc)
38
+ .isoformat()
39
+ .replace("+00:00", "Z"),
40
+ "level": record.levelname,
41
+ "message": record.getMessage(),
42
+ "logger": record.name,
43
+ "pid": record.process,
44
+ "thread": record.thread,
45
+ "file": record.filename,
46
+ "line": record.lineno,
47
+ "function": record.funcName,
48
+ }
49
+ if record.exc_info:
50
+ data["traceback"] = self.formatException(record.exc_info)
51
+ if hasattr(record, "extra"):
52
+ data.update(record.extra)
53
+ if hasattr(record, "context"):
54
+ data["context"] = record.context
55
+ return json.dumps(data, default=self._json_default)
@@ -0,0 +1,112 @@
1
+ # chuk_tool_processor/logging/helpers.py
2
+ from __future__ import annotations
3
+ import time
4
+ import uuid
5
+ from contextlib import contextmanager
6
+ from datetime import datetime, timezone
7
+ from typing import Dict, Optional
8
+
9
+ from .context import get_logger, log_context
10
+ from .metrics import metrics # re-export convenience
11
+
12
+ __all__ = [
13
+ "log_context_span",
14
+ "request_logging",
15
+ "log_tool_call",
16
+ "metrics",
17
+ ]
18
+
19
+ # --------------------------------------------------------------------------- #
20
+ # context-manager helpers
21
+ # --------------------------------------------------------------------------- #
22
+ @contextmanager
23
+ def log_context_span(operation: str, extra: Dict | None = None, *, log_duration=True):
24
+ logger = get_logger(f"chuk_tool_processor.span.{operation}")
25
+ start = time.time()
26
+ span_id = str(uuid.uuid4())
27
+ span_ctx = {
28
+ "span_id": span_id,
29
+ "operation": operation,
30
+ "start_time": datetime.fromtimestamp(start, timezone.utc)
31
+ .isoformat()
32
+ .replace("+00:00", "Z"),
33
+ }
34
+ if extra:
35
+ span_ctx.update(extra)
36
+ prev = log_context.get_copy()
37
+ log_context.update(span_ctx)
38
+
39
+ logger.info("Starting %s", operation)
40
+ try:
41
+ yield
42
+ if log_duration:
43
+ logger.info(
44
+ "Completed %s", operation, extra={"context": {"duration": time.time() - start}}
45
+ )
46
+ else:
47
+ logger.info("Completed %s", operation)
48
+ except Exception as exc:
49
+ logger.exception(
50
+ "Error in %s: %s", operation, exc, extra={"context": {"duration": time.time() - start}}
51
+ )
52
+ raise
53
+ finally:
54
+ log_context.clear()
55
+ if prev:
56
+ log_context.update(prev)
57
+
58
+
59
+ @contextmanager
60
+ def request_logging(request_id: str | None = None):
61
+ logger = get_logger("chuk_tool_processor.request")
62
+ request_id = log_context.start_request(request_id)
63
+ start = time.time()
64
+ logger.info("Starting request %s", request_id)
65
+ try:
66
+ yield request_id
67
+ logger.info(
68
+ "Completed request %s",
69
+ request_id,
70
+ extra={"context": {"duration": time.time() - start}},
71
+ )
72
+ except Exception as exc:
73
+ logger.exception(
74
+ "Error in request %s: %s",
75
+ request_id,
76
+ exc,
77
+ extra={"context": {"duration": time.time() - start}},
78
+ )
79
+ raise
80
+ finally:
81
+ log_context.end_request()
82
+
83
+
84
+ # --------------------------------------------------------------------------- #
85
+ # high-level helper
86
+ # --------------------------------------------------------------------------- #
87
+ def log_tool_call(tool_call, tool_result):
88
+ logger = get_logger("chuk_tool_processor.tool_call")
89
+ dur = (tool_result.end_time - tool_result.start_time).total_seconds()
90
+
91
+ ctx = {
92
+ "tool": tool_call.tool,
93
+ "arguments": tool_call.arguments,
94
+ "result": (
95
+ tool_result.result.model_dump()
96
+ if hasattr(tool_result.result, "model_dump")
97
+ else tool_result.result
98
+ ),
99
+ "error": tool_result.error,
100
+ "duration": dur,
101
+ "machine": tool_result.machine,
102
+ "pid": tool_result.pid,
103
+ }
104
+ if getattr(tool_result, "cached", False):
105
+ ctx["cached"] = True
106
+ if getattr(tool_result, "attempts", 0):
107
+ ctx["attempts"] = tool_result.attempts
108
+
109
+ if tool_result.error:
110
+ logger.error("Tool %s failed: %s", tool_call.tool, tool_result.error, extra={"context": ctx})
111
+ else:
112
+ logger.info("Tool %s succeeded in %.3fs", tool_call.tool, dur, extra={"context": ctx})
@@ -0,0 +1,59 @@
1
+ # chuk_tool_processor/logging/metrics.py
2
+ from __future__ import annotations
3
+ from .context import get_logger
4
+
5
+ __all__ = ["metrics", "MetricsLogger"]
6
+
7
+
8
+ class MetricsLogger:
9
+ def __init__(self):
10
+ self.logger = get_logger("chuk_tool_processor.metrics")
11
+
12
+ # ------------------------------------------------------------------
13
+ def log_tool_execution(
14
+ self,
15
+ tool: str,
16
+ success: bool,
17
+ duration: float,
18
+ *,
19
+ error: str | None = None,
20
+ cached: bool = False,
21
+ attempts: int = 1,
22
+ ):
23
+ self.logger.info(
24
+ f"Tool execution metric: {tool}",
25
+ extra={
26
+ "context": {
27
+ "metric_type": "tool_execution",
28
+ "tool": tool,
29
+ "success": success,
30
+ "duration": duration,
31
+ "error": error,
32
+ "cached": cached,
33
+ "attempts": attempts,
34
+ }
35
+ },
36
+ )
37
+
38
+ def log_parser_metric(
39
+ self,
40
+ parser: str,
41
+ success: bool,
42
+ duration: float,
43
+ num_calls: int,
44
+ ):
45
+ self.logger.info(
46
+ f"Parser metric: {parser}",
47
+ extra={
48
+ "context": {
49
+ "metric_type": "parser",
50
+ "parser": parser,
51
+ "success": success,
52
+ "duration": duration,
53
+ "num_calls": num_calls,
54
+ }
55
+ },
56
+ )
57
+
58
+
59
+ metrics = MetricsLogger()
@@ -1,4 +1,4 @@
1
- # chuk_tool_processor/execution/execution_strategy.py
1
+ # chuk_tool_processor/models/execution_strategy.py
2
2
  from abc import ABC, abstractmethod
3
3
  from typing import List, Optional
4
4