tactus 0.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.0.dist-info/METADATA +1809 -0
- tactus-0.31.0.dist-info/RECORD +160 -0
- tactus-0.31.0.dist-info/WHEEL +4 -0
- tactus-0.31.0.dist-info/entry_points.txt +2 -0
- tactus-0.31.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Server Manager for Tactus.
|
|
3
|
+
|
|
4
|
+
Manages multiple MCP server connections using Pydantic AI's native MCPServerStdio.
|
|
5
|
+
Handles lifecycle, tool prefixing, and tool call tracking.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
import asyncio
|
|
12
|
+
from contextlib import AsyncExitStack
|
|
13
|
+
from typing import Dict, Any, List
|
|
14
|
+
|
|
15
|
+
from pydantic_ai.mcp import MCPServerStdio
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def substitute_env_vars(value: Any) -> Any:
|
|
21
|
+
"""
|
|
22
|
+
Replace ${VAR} with environment variable values.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
value: Value to process (can be str, dict, list, or other)
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Value with environment variables substituted
|
|
29
|
+
"""
|
|
30
|
+
if isinstance(value, str):
|
|
31
|
+
# Replace ${VAR} or $VAR with environment variable value
|
|
32
|
+
return re.sub(r"\$\{(\w+)\}", lambda m: os.getenv(m.group(1), ""), value)
|
|
33
|
+
elif isinstance(value, dict):
|
|
34
|
+
return {k: substitute_env_vars(v) for k, v in value.items()}
|
|
35
|
+
elif isinstance(value, list):
|
|
36
|
+
return [substitute_env_vars(v) for v in value]
|
|
37
|
+
return value
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class MCPServerManager:
|
|
41
|
+
"""
|
|
42
|
+
Manages multiple native Pydantic AI MCP servers.
|
|
43
|
+
|
|
44
|
+
Uses Pydantic AI's MCPServerStdio for stdio transport and automatic
|
|
45
|
+
tool prefixing. Handles connection lifecycle and tool call tracking.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(self, server_configs: Dict[str, Dict[str, Any]], tool_primitive=None):
|
|
49
|
+
"""
|
|
50
|
+
Initialize MCP server manager.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
server_configs: Dict of {server_name: {command, args, env}}
|
|
54
|
+
tool_primitive: Optional ToolPrimitive for recording tool calls
|
|
55
|
+
"""
|
|
56
|
+
self.configs = server_configs
|
|
57
|
+
self.tool_primitive = tool_primitive
|
|
58
|
+
self.servers: List[MCPServerStdio] = []
|
|
59
|
+
self.server_toolsets: Dict[str, MCPServerStdio] = {} # Map server names to toolsets
|
|
60
|
+
self._exit_stack = AsyncExitStack()
|
|
61
|
+
logger.info(f"MCPServerManager initialized with {len(server_configs)} server(s)")
|
|
62
|
+
|
|
63
|
+
async def __aenter__(self):
|
|
64
|
+
"""Connect to all configured MCP servers."""
|
|
65
|
+
for name, config in self.configs.items():
|
|
66
|
+
# Retry a few times for transient stdio startup issues.
|
|
67
|
+
last_error: Exception | None = None
|
|
68
|
+
for attempt in range(1, 4):
|
|
69
|
+
try:
|
|
70
|
+
logger.info(f"Connecting to MCP server '{name}' (attempt {attempt}/3)...")
|
|
71
|
+
|
|
72
|
+
# Substitute environment variables in config
|
|
73
|
+
config = substitute_env_vars(config)
|
|
74
|
+
|
|
75
|
+
# Create base server
|
|
76
|
+
server = MCPServerStdio(
|
|
77
|
+
command=config["command"],
|
|
78
|
+
args=config.get("args", []),
|
|
79
|
+
env=config.get("env"),
|
|
80
|
+
cwd=config.get("cwd"),
|
|
81
|
+
process_tool_call=self._create_trace_callback(name), # Tracking hook
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Wrap with prefix to namespace tools
|
|
85
|
+
prefixed_server = server.prefixed(name)
|
|
86
|
+
|
|
87
|
+
# Connect the prefixed server
|
|
88
|
+
await self._exit_stack.enter_async_context(prefixed_server)
|
|
89
|
+
self.servers.append(prefixed_server)
|
|
90
|
+
self.server_toolsets[name] = prefixed_server # Store by name for lookup
|
|
91
|
+
logger.info(
|
|
92
|
+
f"Successfully connected to MCP server '{name}' with prefix '{name}_'"
|
|
93
|
+
)
|
|
94
|
+
last_error = None
|
|
95
|
+
break
|
|
96
|
+
except Exception as e:
|
|
97
|
+
last_error = e
|
|
98
|
+
|
|
99
|
+
# Check if this is a fileno error (common in test environments)
|
|
100
|
+
import io
|
|
101
|
+
|
|
102
|
+
error_str = str(e)
|
|
103
|
+
if "fileno" in error_str or isinstance(e, io.UnsupportedOperation):
|
|
104
|
+
logger.warning(
|
|
105
|
+
f"Failed to connect to MCP server '{name}': {e} "
|
|
106
|
+
f"(test environment with redirected streams)"
|
|
107
|
+
)
|
|
108
|
+
# Allow procedures to continue without MCP in this environment.
|
|
109
|
+
last_error = None
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
# Retry transient anyio TaskGroup/broken stream issues.
|
|
113
|
+
if (
|
|
114
|
+
"BrokenResourceError" in error_str
|
|
115
|
+
or "unhandled errors in a TaskGroup" in error_str
|
|
116
|
+
):
|
|
117
|
+
logger.warning(
|
|
118
|
+
f"Transient MCP connection failure for '{name}': {e} (retrying)"
|
|
119
|
+
)
|
|
120
|
+
await asyncio.sleep(0.05 * attempt)
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
logger.error(f"Failed to connect to MCP server '{name}': {e}", exc_info=True)
|
|
124
|
+
break
|
|
125
|
+
|
|
126
|
+
if last_error is not None:
|
|
127
|
+
# For non-transient failures, raise so callers can decide whether to ignore.
|
|
128
|
+
raise last_error
|
|
129
|
+
|
|
130
|
+
return self
|
|
131
|
+
|
|
132
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
133
|
+
"""Disconnect from all MCP servers."""
|
|
134
|
+
logger.info("Disconnecting from all MCP servers...")
|
|
135
|
+
await self._exit_stack.aclose()
|
|
136
|
+
logger.info("All MCP servers disconnected")
|
|
137
|
+
|
|
138
|
+
def _create_trace_callback(self, server_name: str):
|
|
139
|
+
"""
|
|
140
|
+
Create a tool call tracing callback for a specific server.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
server_name: Name of the MCP server
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Async callback function for process_tool_call
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
async def trace_tool_call(ctx, next_call, tool_name, tool_args):
|
|
150
|
+
"""Middleware to record tool calls in Tactus ToolPrimitive."""
|
|
151
|
+
logger.debug(
|
|
152
|
+
f"MCP server '{server_name}' calling tool '{tool_name}' with args: {tool_args}"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
result = await next_call(tool_name, tool_args)
|
|
157
|
+
|
|
158
|
+
# Record in ToolPrimitive if available
|
|
159
|
+
if self.tool_primitive:
|
|
160
|
+
# Convert result to string for consistency with old behavior
|
|
161
|
+
# Pydantic AI tools can return various types
|
|
162
|
+
result_str = str(result) if not isinstance(result, str) else result
|
|
163
|
+
self.tool_primitive.record_call(tool_name, tool_args, result_str)
|
|
164
|
+
|
|
165
|
+
logger.debug(f"Tool '{tool_name}' completed successfully")
|
|
166
|
+
return result
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"Tool '{tool_name}' failed: {e}", exc_info=True)
|
|
169
|
+
# Still record the failed call
|
|
170
|
+
if self.tool_primitive:
|
|
171
|
+
error_msg = f"Error: {str(e)}"
|
|
172
|
+
self.tool_primitive.record_call(tool_name, tool_args, error_msg)
|
|
173
|
+
raise
|
|
174
|
+
|
|
175
|
+
return trace_tool_call
|
|
176
|
+
|
|
177
|
+
def get_toolsets(self) -> List[MCPServerStdio]:
|
|
178
|
+
"""
|
|
179
|
+
Return list of connected servers as toolsets.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
List of MCPServerStdio instances (which are AbstractToolset)
|
|
183
|
+
"""
|
|
184
|
+
return self.servers
|
|
185
|
+
|
|
186
|
+
def get_toolset_by_name(self, server_name: str):
|
|
187
|
+
"""
|
|
188
|
+
Get a specific toolset by server name.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
server_name: Name of the MCP server
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
MCPServerStdio instance for the named server, or None if not found
|
|
195
|
+
"""
|
|
196
|
+
return self.server_toolsets.get(server_name)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""
|
|
2
|
+
In-memory storage backend for Tactus.
|
|
3
|
+
|
|
4
|
+
Simple implementation that stores all data in memory (RAM).
|
|
5
|
+
Useful for testing and simple CLI workflows that don't need persistence.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Optional, Any, Dict
|
|
9
|
+
|
|
10
|
+
from tactus.protocols.models import ProcedureMetadata
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class MemoryStorage:
|
|
14
|
+
"""
|
|
15
|
+
In-memory storage backend.
|
|
16
|
+
|
|
17
|
+
All data stored in Python dicts - lost when process exits.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
"""Initialize in-memory storage."""
|
|
22
|
+
self._procedures: Dict[str, ProcedureMetadata] = {}
|
|
23
|
+
|
|
24
|
+
def load_procedure_metadata(self, procedure_id: str) -> ProcedureMetadata:
|
|
25
|
+
"""Load procedure metadata from memory."""
|
|
26
|
+
if procedure_id not in self._procedures:
|
|
27
|
+
# Create new metadata if doesn't exist
|
|
28
|
+
self._procedures[procedure_id] = ProcedureMetadata(procedure_id=procedure_id)
|
|
29
|
+
return self._procedures[procedure_id]
|
|
30
|
+
|
|
31
|
+
def save_procedure_metadata(self, procedure_id: str, metadata: ProcedureMetadata) -> None:
|
|
32
|
+
"""Save procedure metadata to memory."""
|
|
33
|
+
self._procedures[procedure_id] = metadata
|
|
34
|
+
|
|
35
|
+
def update_procedure_status(
|
|
36
|
+
self, procedure_id: str, status: str, waiting_on_message_id: Optional[str] = None
|
|
37
|
+
) -> None:
|
|
38
|
+
"""Update procedure status."""
|
|
39
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
40
|
+
metadata.status = status
|
|
41
|
+
metadata.waiting_on_message_id = waiting_on_message_id
|
|
42
|
+
self.save_procedure_metadata(procedure_id, metadata)
|
|
43
|
+
|
|
44
|
+
def get_state(self, procedure_id: str) -> Dict[str, Any]:
|
|
45
|
+
"""Get mutable state dictionary."""
|
|
46
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
47
|
+
return metadata.state
|
|
48
|
+
|
|
49
|
+
def set_state(self, procedure_id: str, state: Dict[str, Any]) -> None:
|
|
50
|
+
"""Set mutable state dictionary."""
|
|
51
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
52
|
+
metadata.state = state
|
|
53
|
+
self.save_procedure_metadata(procedure_id, metadata)
|
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Local Python Plugin Loader for Tactus.
|
|
3
|
+
|
|
4
|
+
Provides lightweight tool loading from local Python files without requiring MCP servers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import importlib.util
|
|
9
|
+
import inspect
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import List, Any, Optional, Callable
|
|
13
|
+
from pydantic_ai import Tool
|
|
14
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class PluginLoader:
|
|
20
|
+
"""
|
|
21
|
+
Loader for local Python function tools.
|
|
22
|
+
|
|
23
|
+
Scans specified directories and files for Python functions and converts them
|
|
24
|
+
to Pydantic AI Tool instances.
|
|
25
|
+
|
|
26
|
+
Convention: Any public function (not starting with _) in the specified paths
|
|
27
|
+
is automatically loaded as a tool. The function's docstring becomes the tool
|
|
28
|
+
description, and type hints are used for parameter validation.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, tool_primitive: Optional[Any] = None):
|
|
32
|
+
"""
|
|
33
|
+
Initialize plugin loader.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
tool_primitive: Optional ToolPrimitive for recording tool calls
|
|
37
|
+
"""
|
|
38
|
+
self.tool_primitive = tool_primitive
|
|
39
|
+
self.loaded_modules = {} # Cache loaded modules
|
|
40
|
+
logger.debug("PluginLoader initialized")
|
|
41
|
+
|
|
42
|
+
def create_toolset(self, paths: List[str], name: str = "plugin") -> FunctionToolset:
|
|
43
|
+
"""
|
|
44
|
+
Create a FunctionToolset from specified paths.
|
|
45
|
+
|
|
46
|
+
This is the preferred method for loading plugin tools - it returns a composable
|
|
47
|
+
toolset that can be combined with other toolsets.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
paths: List of directory paths or file paths to scan
|
|
51
|
+
name: Name for the toolset (used in logging)
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
FunctionToolset instance containing all loaded tools
|
|
55
|
+
"""
|
|
56
|
+
# Load all functions from paths
|
|
57
|
+
functions = self._load_all_functions(paths)
|
|
58
|
+
|
|
59
|
+
if not functions:
|
|
60
|
+
logger.warning(f"No functions found in paths: {paths}")
|
|
61
|
+
# Return empty toolset
|
|
62
|
+
return FunctionToolset(tools=[])
|
|
63
|
+
|
|
64
|
+
# Create toolset
|
|
65
|
+
# Note: FunctionToolset doesn't support process_tool_call parameter
|
|
66
|
+
# Tool call tracking needs to be done at Agent level
|
|
67
|
+
toolset = FunctionToolset(tools=functions)
|
|
68
|
+
|
|
69
|
+
logger.info(f"Created FunctionToolset '{name}' with {len(functions)} tool(s)")
|
|
70
|
+
return toolset
|
|
71
|
+
|
|
72
|
+
def load_from_paths(self, paths: List[str]) -> List[Tool]:
|
|
73
|
+
"""
|
|
74
|
+
Load tools from specified paths (directories or files).
|
|
75
|
+
|
|
76
|
+
DEPRECATED: Use create_toolset() instead. This method is kept for backward
|
|
77
|
+
compatibility but will be removed in a future version.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
paths: List of directory paths or file paths to scan
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
List of pydantic_ai.Tool instances
|
|
84
|
+
"""
|
|
85
|
+
all_tools = []
|
|
86
|
+
|
|
87
|
+
for path_str in paths:
|
|
88
|
+
path = Path(path_str).resolve()
|
|
89
|
+
|
|
90
|
+
if not path.exists():
|
|
91
|
+
logger.warning(f"Tool path does not exist: {path}")
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
if path.is_file():
|
|
95
|
+
# Load tools from single file
|
|
96
|
+
if path.suffix == ".py":
|
|
97
|
+
tools = self._load_tools_from_file(path)
|
|
98
|
+
all_tools.extend(tools)
|
|
99
|
+
else:
|
|
100
|
+
logger.warning(f"Skipping non-Python file: {path}")
|
|
101
|
+
elif path.is_dir():
|
|
102
|
+
# Scan directory for Python files
|
|
103
|
+
tools = self._load_tools_from_directory(path)
|
|
104
|
+
all_tools.extend(tools)
|
|
105
|
+
else:
|
|
106
|
+
logger.warning(f"Path is neither file nor directory: {path}")
|
|
107
|
+
|
|
108
|
+
logger.info(f"Loaded {len(all_tools)} tools from {len(paths)} path(s)")
|
|
109
|
+
return all_tools
|
|
110
|
+
|
|
111
|
+
def _load_all_functions(self, paths: List[str]) -> List[Callable]:
|
|
112
|
+
"""
|
|
113
|
+
Load all functions from specified paths.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
paths: List of directory paths or file paths to scan
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
List of callable functions (not wrapped in Tool)
|
|
120
|
+
"""
|
|
121
|
+
all_functions = []
|
|
122
|
+
|
|
123
|
+
for path_str in paths:
|
|
124
|
+
path = Path(path_str).resolve()
|
|
125
|
+
|
|
126
|
+
if not path.exists():
|
|
127
|
+
logger.warning(f"Tool path does not exist: {path}")
|
|
128
|
+
continue
|
|
129
|
+
|
|
130
|
+
if path.is_file():
|
|
131
|
+
if path.suffix == ".py":
|
|
132
|
+
functions = self._load_functions_from_file(path)
|
|
133
|
+
all_functions.extend(functions)
|
|
134
|
+
else:
|
|
135
|
+
logger.warning(f"Skipping non-Python file: {path}")
|
|
136
|
+
elif path.is_dir():
|
|
137
|
+
functions = self._load_functions_from_directory(path)
|
|
138
|
+
all_functions.extend(functions)
|
|
139
|
+
else:
|
|
140
|
+
logger.warning(f"Path is neither file nor directory: {path}")
|
|
141
|
+
|
|
142
|
+
logger.debug(f"Loaded {len(all_functions)} function(s) from {len(paths)} path(s)")
|
|
143
|
+
return all_functions
|
|
144
|
+
|
|
145
|
+
def _load_functions_from_directory(self, directory: Path) -> List[Callable]:
|
|
146
|
+
"""
|
|
147
|
+
Scan directory for Python files and load functions.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
directory: Directory path to scan
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
List of callable functions
|
|
154
|
+
"""
|
|
155
|
+
functions = []
|
|
156
|
+
|
|
157
|
+
for py_file in directory.glob("*.py"):
|
|
158
|
+
if py_file.name.startswith("_"):
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
file_functions = self._load_functions_from_file(py_file)
|
|
162
|
+
functions.extend(file_functions)
|
|
163
|
+
|
|
164
|
+
return functions
|
|
165
|
+
|
|
166
|
+
def _load_functions_from_file(self, file_path: Path) -> List[Callable]:
|
|
167
|
+
"""
|
|
168
|
+
Load functions from a single Python file.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
file_path: Path to Python file
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
List of callable functions
|
|
175
|
+
"""
|
|
176
|
+
functions = []
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
# Create module name from file path
|
|
180
|
+
module_name = f"tactus_plugin_{file_path.stem}_{id(file_path)}"
|
|
181
|
+
|
|
182
|
+
# Load module dynamically
|
|
183
|
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
|
184
|
+
if spec is None or spec.loader is None:
|
|
185
|
+
logger.error(f"Could not load spec for {file_path}")
|
|
186
|
+
return functions
|
|
187
|
+
|
|
188
|
+
module = importlib.util.module_from_spec(spec)
|
|
189
|
+
sys.modules[module_name] = module
|
|
190
|
+
spec.loader.exec_module(module)
|
|
191
|
+
|
|
192
|
+
# Cache the module
|
|
193
|
+
self.loaded_modules[str(file_path)] = module
|
|
194
|
+
|
|
195
|
+
logger.debug(f"Loaded module from {file_path}")
|
|
196
|
+
|
|
197
|
+
# Find all public functions in the module
|
|
198
|
+
for name, obj in inspect.getmembers(module):
|
|
199
|
+
if self._is_valid_tool_function(name, obj, module):
|
|
200
|
+
functions.append(obj)
|
|
201
|
+
logger.debug(f"Found function '{name}' in {file_path.name}")
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
logger.error(f"Failed to load functions from {file_path}: {e}", exc_info=True)
|
|
205
|
+
|
|
206
|
+
return functions
|
|
207
|
+
|
|
208
|
+
def _create_trace_callback(self, toolset_name: str):
|
|
209
|
+
"""
|
|
210
|
+
Create a tool call tracing callback for the toolset.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
toolset_name: Name of the toolset
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
Async callback function for process_tool_call
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
async def trace_tool_call(ctx, next_call, tool_name, tool_args):
|
|
220
|
+
"""Middleware to record tool calls in Tactus ToolPrimitive."""
|
|
221
|
+
logger.debug(
|
|
222
|
+
f"Toolset '{toolset_name}' calling tool '{tool_name}' with args: {tool_args}"
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
result = await next_call(tool_name, tool_args)
|
|
227
|
+
|
|
228
|
+
# Record in ToolPrimitive if available
|
|
229
|
+
if self.tool_primitive:
|
|
230
|
+
result_str = str(result) if not isinstance(result, str) else result
|
|
231
|
+
self.tool_primitive.record_call(tool_name, tool_args, result_str)
|
|
232
|
+
|
|
233
|
+
logger.debug(f"Tool '{tool_name}' completed successfully")
|
|
234
|
+
return result
|
|
235
|
+
except Exception as e:
|
|
236
|
+
logger.error(f"Tool '{tool_name}' failed: {e}", exc_info=True)
|
|
237
|
+
# Still record the failed call
|
|
238
|
+
if self.tool_primitive:
|
|
239
|
+
error_msg = f"Error: {str(e)}"
|
|
240
|
+
self.tool_primitive.record_call(tool_name, tool_args, error_msg)
|
|
241
|
+
raise
|
|
242
|
+
|
|
243
|
+
return trace_tool_call
|
|
244
|
+
|
|
245
|
+
def _load_tools_from_directory(self, directory: Path) -> List[Tool]:
|
|
246
|
+
"""
|
|
247
|
+
Recursively scan directory for Python files and load tools.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
directory: Directory path to scan
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
List of Tool instances
|
|
254
|
+
"""
|
|
255
|
+
tools = []
|
|
256
|
+
|
|
257
|
+
# Find all .py files (non-recursive for now)
|
|
258
|
+
for py_file in directory.glob("*.py"):
|
|
259
|
+
if py_file.name.startswith("_"):
|
|
260
|
+
# Skip private modules (e.g., __init__.py, __pycache__)
|
|
261
|
+
continue
|
|
262
|
+
|
|
263
|
+
file_tools = self._load_tools_from_file(py_file)
|
|
264
|
+
tools.extend(file_tools)
|
|
265
|
+
|
|
266
|
+
return tools
|
|
267
|
+
|
|
268
|
+
def _load_tools_from_file(self, file_path: Path) -> List[Tool]:
|
|
269
|
+
"""
|
|
270
|
+
Load tools from a single Python file.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
file_path: Path to Python file
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
List of Tool instances
|
|
277
|
+
"""
|
|
278
|
+
tools = []
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
# Create module name from file path
|
|
282
|
+
module_name = f"tactus_plugin_{file_path.stem}_{id(file_path)}"
|
|
283
|
+
|
|
284
|
+
# Load module dynamically
|
|
285
|
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
|
286
|
+
if spec is None or spec.loader is None:
|
|
287
|
+
logger.error(f"Could not load spec for {file_path}")
|
|
288
|
+
return tools
|
|
289
|
+
|
|
290
|
+
module = importlib.util.module_from_spec(spec)
|
|
291
|
+
|
|
292
|
+
# Add to sys.modules so imports work
|
|
293
|
+
sys.modules[module_name] = module
|
|
294
|
+
|
|
295
|
+
# Execute module
|
|
296
|
+
spec.loader.exec_module(module)
|
|
297
|
+
|
|
298
|
+
# Cache the module
|
|
299
|
+
self.loaded_modules[str(file_path)] = module
|
|
300
|
+
|
|
301
|
+
logger.debug(f"Loaded module from {file_path}")
|
|
302
|
+
|
|
303
|
+
# Find all public functions in the module
|
|
304
|
+
for name, obj in inspect.getmembers(module):
|
|
305
|
+
if self._is_valid_tool_function(name, obj, module):
|
|
306
|
+
tool = self._create_tool_from_function(obj, name)
|
|
307
|
+
if tool:
|
|
308
|
+
tools.append(tool)
|
|
309
|
+
logger.info(f"Loaded tool '{name}' from {file_path.name}")
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
logger.error(f"Failed to load tools from {file_path}: {e}", exc_info=True)
|
|
313
|
+
|
|
314
|
+
return tools
|
|
315
|
+
|
|
316
|
+
def _is_valid_tool_function(self, name: str, obj: Any, module: Any) -> bool:
|
|
317
|
+
"""
|
|
318
|
+
Check if an object is a valid tool function.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
name: Object name
|
|
322
|
+
obj: Object to check
|
|
323
|
+
module: Module the object belongs to
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
True if object is a valid tool function
|
|
327
|
+
"""
|
|
328
|
+
# Must be a function
|
|
329
|
+
if not inspect.isfunction(obj):
|
|
330
|
+
return False
|
|
331
|
+
|
|
332
|
+
# Must be public (not start with _)
|
|
333
|
+
if name.startswith("_"):
|
|
334
|
+
return False
|
|
335
|
+
|
|
336
|
+
# Must be defined in this module (not imported)
|
|
337
|
+
if obj.__module__ != module.__name__:
|
|
338
|
+
return False
|
|
339
|
+
|
|
340
|
+
return True
|
|
341
|
+
|
|
342
|
+
def _create_tool_from_function(self, func: Callable, name: str) -> Optional[Tool]:
|
|
343
|
+
"""
|
|
344
|
+
Create a Pydantic AI Tool from a Python function.
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
func: Python function to wrap
|
|
348
|
+
name: Tool name
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
Tool instance or None if creation fails
|
|
352
|
+
"""
|
|
353
|
+
try:
|
|
354
|
+
# Get function signature and docstring
|
|
355
|
+
sig = inspect.signature(func)
|
|
356
|
+
doc = inspect.getdoc(func) or f"Tool: {name}"
|
|
357
|
+
|
|
358
|
+
# Check if function is async
|
|
359
|
+
is_async = inspect.iscoroutinefunction(func)
|
|
360
|
+
|
|
361
|
+
# Create wrapper that records tool calls
|
|
362
|
+
if is_async:
|
|
363
|
+
|
|
364
|
+
async def tool_wrapper(*args, **kwargs):
|
|
365
|
+
"""Async wrapper for tool function."""
|
|
366
|
+
try:
|
|
367
|
+
result = await func(*args, **kwargs)
|
|
368
|
+
|
|
369
|
+
# Record tool call if tool_primitive is available
|
|
370
|
+
if self.tool_primitive:
|
|
371
|
+
self.tool_primitive.record_call(name, kwargs, str(result))
|
|
372
|
+
|
|
373
|
+
return result
|
|
374
|
+
except Exception as e:
|
|
375
|
+
logger.error(f"Tool '{name}' execution failed: {e}", exc_info=True)
|
|
376
|
+
error_msg = f"Error executing tool '{name}': {str(e)}"
|
|
377
|
+
|
|
378
|
+
# Record failed call
|
|
379
|
+
if self.tool_primitive:
|
|
380
|
+
self.tool_primitive.record_call(name, kwargs, error_msg)
|
|
381
|
+
|
|
382
|
+
raise
|
|
383
|
+
|
|
384
|
+
else:
|
|
385
|
+
|
|
386
|
+
def tool_wrapper(*args, **kwargs):
|
|
387
|
+
"""Sync wrapper for tool function."""
|
|
388
|
+
try:
|
|
389
|
+
result = func(*args, **kwargs)
|
|
390
|
+
|
|
391
|
+
# Record tool call if tool_primitive is available
|
|
392
|
+
if self.tool_primitive:
|
|
393
|
+
self.tool_primitive.record_call(name, kwargs, str(result))
|
|
394
|
+
|
|
395
|
+
return result
|
|
396
|
+
except Exception as e:
|
|
397
|
+
logger.error(f"Tool '{name}' execution failed: {e}", exc_info=True)
|
|
398
|
+
error_msg = f"Error executing tool '{name}': {str(e)}"
|
|
399
|
+
|
|
400
|
+
# Record failed call
|
|
401
|
+
if self.tool_primitive:
|
|
402
|
+
self.tool_primitive.record_call(name, kwargs, error_msg)
|
|
403
|
+
|
|
404
|
+
raise
|
|
405
|
+
|
|
406
|
+
# Copy signature and docstring to wrapper
|
|
407
|
+
tool_wrapper.__signature__ = sig
|
|
408
|
+
tool_wrapper.__doc__ = doc
|
|
409
|
+
tool_wrapper.__name__ = name
|
|
410
|
+
tool_wrapper.__annotations__ = func.__annotations__
|
|
411
|
+
|
|
412
|
+
# Create Pydantic AI Tool
|
|
413
|
+
tool = Tool(tool_wrapper, name=name, description=doc)
|
|
414
|
+
|
|
415
|
+
return tool
|
|
416
|
+
|
|
417
|
+
except Exception as e:
|
|
418
|
+
logger.error(f"Failed to create tool from function '{name}': {e}", exc_info=True)
|
|
419
|
+
return None
|