tactus 0.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.0.dist-info/METADATA +1809 -0
- tactus-0.31.0.dist-info/RECORD +160 -0
- tactus-0.31.0.dist-info/WHEEL +4 -0
- tactus-0.31.0.dist-info/entry_points.txt +2 -0
- tactus-0.31.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lua Tools Adapter - Convert DSL-defined Lua functions to Pydantic AI tools.
|
|
3
|
+
|
|
4
|
+
Supports:
|
|
5
|
+
- Individual tool() declarations
|
|
6
|
+
- toolset() with type="lua"
|
|
7
|
+
- Inline agent tools with lambda functions
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
from inspect import Parameter, Signature
|
|
12
|
+
from typing import Any, Dict, List, Optional, Callable
|
|
13
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
14
|
+
from pydantic import BaseModel, Field, create_model
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LuaToolsAdapter:
|
|
20
|
+
"""Adapter to create Pydantic AI toolsets from Lua function definitions."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, tool_primitive: Optional[Any] = None, mock_manager: Optional[Any] = None):
|
|
23
|
+
"""
|
|
24
|
+
Initialize adapter.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
tool_primitive: Optional ToolPrimitive for call tracking
|
|
28
|
+
mock_manager: Optional MockManager for mock responses
|
|
29
|
+
"""
|
|
30
|
+
self.tool_primitive = tool_primitive
|
|
31
|
+
self.mock_manager = mock_manager
|
|
32
|
+
|
|
33
|
+
def create_single_tool_toolset(
|
|
34
|
+
self, tool_name: str, tool_spec: Dict[str, Any]
|
|
35
|
+
) -> FunctionToolset:
|
|
36
|
+
"""
|
|
37
|
+
Create a FunctionToolset from a single tool() declaration.
|
|
38
|
+
|
|
39
|
+
Used for: tool("name", {...}, function)
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
tool_name: Name of the tool
|
|
43
|
+
tool_spec: Dict with description, parameters, handler
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
FunctionToolset with single tool
|
|
47
|
+
"""
|
|
48
|
+
wrapped_fn = self._create_wrapped_function(tool_name, tool_spec)
|
|
49
|
+
logger.info(f"Created single-tool toolset '{tool_name}'")
|
|
50
|
+
return FunctionToolset(tools=[wrapped_fn])
|
|
51
|
+
|
|
52
|
+
def create_lua_toolset(
|
|
53
|
+
self, toolset_name: str, toolset_config: Dict[str, Any]
|
|
54
|
+
) -> FunctionToolset:
|
|
55
|
+
"""
|
|
56
|
+
Create a FunctionToolset from toolset() with type="lua".
|
|
57
|
+
|
|
58
|
+
Used for: toolset("name", {type="lua", tools={...}})
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
toolset_name: Name of the toolset
|
|
62
|
+
toolset_config: Dict with type="lua" and tools list
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
FunctionToolset with all tools
|
|
66
|
+
"""
|
|
67
|
+
tools_list = toolset_config.get("tools", [])
|
|
68
|
+
if not tools_list:
|
|
69
|
+
logger.warning(f"Lua toolset '{toolset_name}' has no tools")
|
|
70
|
+
return FunctionToolset(tools=[])
|
|
71
|
+
|
|
72
|
+
wrapped_functions = []
|
|
73
|
+
for tool_spec in tools_list:
|
|
74
|
+
tool_name = tool_spec.get("name")
|
|
75
|
+
if not tool_name:
|
|
76
|
+
logger.error(f"Tool in toolset '{toolset_name}' missing name")
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
wrapped_fn = self._create_wrapped_function(tool_name, tool_spec)
|
|
80
|
+
wrapped_functions.append(wrapped_fn)
|
|
81
|
+
|
|
82
|
+
logger.info(f"Created Lua toolset '{toolset_name}' with {len(wrapped_functions)} tools")
|
|
83
|
+
return FunctionToolset(tools=wrapped_functions)
|
|
84
|
+
|
|
85
|
+
def create_inline_tools_toolset(
|
|
86
|
+
self, agent_name: str, tools_list: List[Dict[str, Any]]
|
|
87
|
+
) -> FunctionToolset:
|
|
88
|
+
"""
|
|
89
|
+
Create a FunctionToolset from inline agent tools.
|
|
90
|
+
|
|
91
|
+
Used for: agent("name", {tools = {{...}}})
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
agent_name: Name of the agent
|
|
95
|
+
tools_list: List of inline tool specs
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
FunctionToolset with inline tools
|
|
99
|
+
"""
|
|
100
|
+
wrapped_functions = []
|
|
101
|
+
for tool_spec in tools_list:
|
|
102
|
+
tool_name = tool_spec.get("name")
|
|
103
|
+
if not tool_name:
|
|
104
|
+
logger.error(f"Inline tool for agent '{agent_name}' missing name")
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
# Prefix tool name with agent name for uniqueness
|
|
108
|
+
prefixed_name = f"{agent_name}_{tool_name}"
|
|
109
|
+
wrapped_fn = self._create_wrapped_function(prefixed_name, tool_spec)
|
|
110
|
+
wrapped_functions.append(wrapped_fn)
|
|
111
|
+
|
|
112
|
+
logger.info(
|
|
113
|
+
f"Created inline tools for agent '{agent_name}': {len(wrapped_functions)} tools"
|
|
114
|
+
)
|
|
115
|
+
return FunctionToolset(tools=wrapped_functions)
|
|
116
|
+
|
|
117
|
+
def create_inline_toolset(
|
|
118
|
+
self, toolset_name: str, tools_list: List[Dict[str, Any]]
|
|
119
|
+
) -> FunctionToolset:
|
|
120
|
+
"""
|
|
121
|
+
Create a FunctionToolset from inline toolset tools.
|
|
122
|
+
|
|
123
|
+
Used for: Toolset "name" { tools = {{...}} }
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
toolset_name: Name of the toolset
|
|
127
|
+
tools_list: List of inline tool specs
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
FunctionToolset with inline tools
|
|
131
|
+
"""
|
|
132
|
+
wrapped_functions = []
|
|
133
|
+
for tool_spec in tools_list:
|
|
134
|
+
tool_name = tool_spec.get("name")
|
|
135
|
+
if not tool_name:
|
|
136
|
+
logger.error(f"Inline tool for toolset '{toolset_name}' missing name")
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
# Prefix tool name with toolset name for uniqueness
|
|
140
|
+
prefixed_name = f"{toolset_name}_{tool_name}"
|
|
141
|
+
wrapped_fn = self._create_wrapped_function(prefixed_name, tool_spec)
|
|
142
|
+
wrapped_functions.append(wrapped_fn)
|
|
143
|
+
|
|
144
|
+
logger.info(f"Created inline toolset '{toolset_name}': {len(wrapped_functions)} tools")
|
|
145
|
+
return FunctionToolset(tools=wrapped_functions)
|
|
146
|
+
|
|
147
|
+
def _create_wrapped_function(self, tool_name: str, tool_spec: Dict[str, Any]) -> Callable:
|
|
148
|
+
"""
|
|
149
|
+
Create a Python async function that wraps a Lua handler.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
tool_name: Tool name for logging/tracking
|
|
153
|
+
tool_spec: Dict with description, parameters, handler
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Async Python function suitable for FunctionToolset
|
|
157
|
+
"""
|
|
158
|
+
lua_handler = tool_spec.get("handler")
|
|
159
|
+
if lua_handler is None:
|
|
160
|
+
# Tool/Toolset DSL blocks can specify the handler as an unnamed function value.
|
|
161
|
+
# `lua_table_to_dict()` preserves that as numeric key `1` for mixed tables.
|
|
162
|
+
try:
|
|
163
|
+
candidate = tool_spec.get(1)
|
|
164
|
+
except Exception:
|
|
165
|
+
candidate = None
|
|
166
|
+
if candidate is not None and callable(candidate):
|
|
167
|
+
lua_handler = candidate
|
|
168
|
+
description = tool_spec.get("description", f"Tool: {tool_name}")
|
|
169
|
+
# Only support 'input' field name (new DSL syntax only)
|
|
170
|
+
input_schema = tool_spec.get("input", {})
|
|
171
|
+
|
|
172
|
+
# Debug what we received
|
|
173
|
+
logger.debug(f"Tool '{tool_name}' spec keys: {list(tool_spec.keys())}")
|
|
174
|
+
logger.debug(f"Tool '{tool_name}' full spec: {tool_spec}")
|
|
175
|
+
|
|
176
|
+
if not lua_handler:
|
|
177
|
+
raise ValueError(f"Tool '{tool_name}' missing handler function")
|
|
178
|
+
|
|
179
|
+
# Create Pydantic model for input
|
|
180
|
+
param_model = self._create_parameter_model(tool_name, input_schema)
|
|
181
|
+
|
|
182
|
+
# Create async wrapper function
|
|
183
|
+
async def wrapped_tool(**kwargs) -> str:
|
|
184
|
+
"""Tool function that calls Lua handler."""
|
|
185
|
+
try:
|
|
186
|
+
# Check for mock response first
|
|
187
|
+
if self.mock_manager:
|
|
188
|
+
mock_result = self.mock_manager.get_mock_response(tool_name, kwargs)
|
|
189
|
+
if mock_result is not None:
|
|
190
|
+
logger.debug(f"Using mock response for '{tool_name}': {mock_result}")
|
|
191
|
+
# Convert mock result to string to match tool return type
|
|
192
|
+
result_str = str(mock_result) if mock_result is not None else ""
|
|
193
|
+
# Track the mock call
|
|
194
|
+
if self.tool_primitive:
|
|
195
|
+
self.tool_primitive.record_call(tool_name, kwargs, result_str)
|
|
196
|
+
if self.mock_manager:
|
|
197
|
+
self.mock_manager.record_call(tool_name, kwargs, result_str)
|
|
198
|
+
return result_str
|
|
199
|
+
|
|
200
|
+
# Call Lua function directly (Lupa is NOT thread-safe, so we can't use executor)
|
|
201
|
+
# Lua handlers should be fast and don't do I/O, so this won't block significantly
|
|
202
|
+
|
|
203
|
+
# Debug: Log what we're passing
|
|
204
|
+
logger.debug(f"Calling Lua tool '{tool_name}' with kwargs: {kwargs}")
|
|
205
|
+
|
|
206
|
+
# Tool functions expect parameters as a single 'args' table
|
|
207
|
+
# Pass kwargs directly - Lupa automatically converts Python dicts to Lua tables
|
|
208
|
+
result = lua_handler(kwargs)
|
|
209
|
+
|
|
210
|
+
# Convert result to string
|
|
211
|
+
result_str = str(result) if result is not None else ""
|
|
212
|
+
|
|
213
|
+
# Record tool call
|
|
214
|
+
if self.tool_primitive:
|
|
215
|
+
self.tool_primitive.record_call(tool_name, kwargs, result_str)
|
|
216
|
+
|
|
217
|
+
# Also track in mock manager for assertions
|
|
218
|
+
if self.mock_manager:
|
|
219
|
+
self.mock_manager.record_call(tool_name, kwargs, result_str)
|
|
220
|
+
|
|
221
|
+
logger.debug(f"Lua tool '{tool_name}' executed successfully")
|
|
222
|
+
return result_str
|
|
223
|
+
|
|
224
|
+
except Exception as e:
|
|
225
|
+
error_msg = f"Error executing Lua tool '{tool_name}': {str(e)}"
|
|
226
|
+
logger.error(error_msg, exc_info=True)
|
|
227
|
+
|
|
228
|
+
# Record failed call
|
|
229
|
+
if self.tool_primitive:
|
|
230
|
+
self.tool_primitive.record_call(tool_name, kwargs, error_msg)
|
|
231
|
+
|
|
232
|
+
# Re-raise to let agent handle it
|
|
233
|
+
raise RuntimeError(error_msg) from e
|
|
234
|
+
|
|
235
|
+
# Build proper signature for Pydantic AI tool discovery
|
|
236
|
+
sig_params = []
|
|
237
|
+
logger.debug(f"Building signature for tool '{tool_name}' with schema: {input_schema}")
|
|
238
|
+
|
|
239
|
+
# Lua table iteration order is undefined, so ensure signature is always valid:
|
|
240
|
+
# required params (no defaults) must come before optional params (with defaults).
|
|
241
|
+
required_param_names: list[str] = []
|
|
242
|
+
optional_param_names: list[str] = []
|
|
243
|
+
for param_name in sorted(input_schema.keys()):
|
|
244
|
+
param_spec = input_schema.get(param_name, {}) or {}
|
|
245
|
+
if param_spec.get("required", True):
|
|
246
|
+
required_param_names.append(param_name)
|
|
247
|
+
else:
|
|
248
|
+
optional_param_names.append(param_name)
|
|
249
|
+
|
|
250
|
+
for param_name in required_param_names + optional_param_names:
|
|
251
|
+
param_spec = input_schema.get(param_name, {}) or {}
|
|
252
|
+
param_type = self._map_lua_type(param_spec.get("type", "string"))
|
|
253
|
+
required = param_spec.get("required", True)
|
|
254
|
+
|
|
255
|
+
if required:
|
|
256
|
+
param = Parameter(
|
|
257
|
+
param_name, Parameter.POSITIONAL_OR_KEYWORD, annotation=param_type
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
default = param_spec.get("default")
|
|
261
|
+
param = Parameter(
|
|
262
|
+
param_name,
|
|
263
|
+
Parameter.POSITIONAL_OR_KEYWORD,
|
|
264
|
+
default=default,
|
|
265
|
+
annotation=Optional[param_type],
|
|
266
|
+
)
|
|
267
|
+
sig_params.append(param)
|
|
268
|
+
|
|
269
|
+
# Set function metadata for Pydantic AI
|
|
270
|
+
wrapped_tool.__name__ = tool_name
|
|
271
|
+
wrapped_tool.__doc__ = description
|
|
272
|
+
wrapped_tool.__signature__ = Signature(sig_params, return_annotation=str)
|
|
273
|
+
wrapped_tool.__annotations__ = self._build_annotations(param_model)
|
|
274
|
+
|
|
275
|
+
return wrapped_tool
|
|
276
|
+
|
|
277
|
+
def _create_parameter_model(
|
|
278
|
+
self, tool_name: str, parameters: Dict[str, Dict[str, Any]]
|
|
279
|
+
) -> type[BaseModel]:
|
|
280
|
+
"""
|
|
281
|
+
Create a Pydantic model from Lua parameter specifications.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
tool_name: Tool name for model naming
|
|
285
|
+
parameters: Dict of param_name -> {type, description, required}
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Dynamically created Pydantic model class
|
|
289
|
+
"""
|
|
290
|
+
if not parameters:
|
|
291
|
+
# No parameters - return empty model
|
|
292
|
+
return create_model(f"{tool_name}Params")
|
|
293
|
+
|
|
294
|
+
fields = {}
|
|
295
|
+
for param_name, param_spec in parameters.items():
|
|
296
|
+
param_type_str = param_spec.get("type", "string")
|
|
297
|
+
description = param_spec.get("description", "")
|
|
298
|
+
required = param_spec.get("required", True)
|
|
299
|
+
|
|
300
|
+
# Map Lua type strings to Python types
|
|
301
|
+
python_type = self._map_lua_type(param_type_str)
|
|
302
|
+
|
|
303
|
+
# Create field
|
|
304
|
+
if required:
|
|
305
|
+
fields[param_name] = (python_type, Field(..., description=description))
|
|
306
|
+
else:
|
|
307
|
+
default = param_spec.get("default")
|
|
308
|
+
fields[param_name] = (
|
|
309
|
+
Optional[python_type],
|
|
310
|
+
Field(default=default, description=description),
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
return create_model(f"{tool_name}Params", **fields)
|
|
314
|
+
|
|
315
|
+
def _map_lua_type(self, lua_type: str) -> type:
|
|
316
|
+
"""Map Lua type string to Python type."""
|
|
317
|
+
type_map = {
|
|
318
|
+
"string": str,
|
|
319
|
+
"number": float,
|
|
320
|
+
"integer": int,
|
|
321
|
+
"boolean": bool,
|
|
322
|
+
"table": dict,
|
|
323
|
+
"array": list,
|
|
324
|
+
}
|
|
325
|
+
return type_map.get(lua_type.lower(), str)
|
|
326
|
+
|
|
327
|
+
def _build_annotations(self, param_model: type[BaseModel]) -> Dict[str, type]:
|
|
328
|
+
"""Build __annotations__ dict from Pydantic model."""
|
|
329
|
+
if not param_model.model_fields:
|
|
330
|
+
return {"return": str}
|
|
331
|
+
|
|
332
|
+
annotations = {}
|
|
333
|
+
for field_name, field_info in param_model.model_fields.items():
|
|
334
|
+
annotations[field_name] = field_info.annotation
|
|
335
|
+
annotations["return"] = str
|
|
336
|
+
return annotations
|
tactus/adapters/mcp.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP (Model Context Protocol) adapter for Tactus.
|
|
3
|
+
|
|
4
|
+
Provides integration with MCP servers to load and convert tools for use with Pydantic AI agents.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import List, Any, Optional, Dict
|
|
9
|
+
from pydantic import create_model, Field
|
|
10
|
+
from pydantic_ai import Tool
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PydanticAIMCPAdapter:
|
|
16
|
+
"""
|
|
17
|
+
Adapter for converting MCP tools to Pydantic AI format.
|
|
18
|
+
|
|
19
|
+
Converts MCP tool definitions (with JSON Schema inputSchema) into
|
|
20
|
+
Pydantic AI Tool instances with dynamically generated Pydantic models.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, mcp_client: Any, tool_primitive: Optional[Any] = None):
|
|
24
|
+
"""
|
|
25
|
+
Initialize MCP adapter.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
mcp_client: MCP client instance (from fastmcp or similar)
|
|
29
|
+
tool_primitive: Optional ToolPrimitive for recording tool calls
|
|
30
|
+
"""
|
|
31
|
+
self.mcp_client = mcp_client
|
|
32
|
+
self.tool_primitive = tool_primitive
|
|
33
|
+
logger.debug("PydanticAIMCPAdapter initialized")
|
|
34
|
+
|
|
35
|
+
async def load_tools(self) -> List[Tool]:
|
|
36
|
+
"""
|
|
37
|
+
Load tools from MCP server and convert to Pydantic AI Tools.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
List of pydantic_ai.Tool instances
|
|
41
|
+
|
|
42
|
+
Note: Assumes MCP client has a method to list tools (e.g., list_tools())
|
|
43
|
+
"""
|
|
44
|
+
try:
|
|
45
|
+
# Query MCP server for available tools
|
|
46
|
+
# Common MCP client interface: list_tools() or get_tools()
|
|
47
|
+
if hasattr(self.mcp_client, "list_tools"):
|
|
48
|
+
mcp_tools = await self.mcp_client.list_tools()
|
|
49
|
+
elif hasattr(self.mcp_client, "get_tools"):
|
|
50
|
+
mcp_tools = await self.mcp_client.get_tools()
|
|
51
|
+
else:
|
|
52
|
+
# Try calling as a method that returns tools
|
|
53
|
+
logger.warning(
|
|
54
|
+
"MCP client doesn't have list_tools() or get_tools(), trying direct call"
|
|
55
|
+
)
|
|
56
|
+
mcp_tools = await self.mcp_client() if callable(self.mcp_client) else []
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.error(f"Failed to load tools from MCP server: {e}", exc_info=True)
|
|
59
|
+
return []
|
|
60
|
+
|
|
61
|
+
if not mcp_tools:
|
|
62
|
+
logger.warning("No tools found from MCP server")
|
|
63
|
+
return []
|
|
64
|
+
|
|
65
|
+
logger.info(f"Found {len(mcp_tools)} tools from MCP server")
|
|
66
|
+
|
|
67
|
+
# Convert each MCP tool to Pydantic AI Tool
|
|
68
|
+
pydantic_tools = []
|
|
69
|
+
for mcp_tool in mcp_tools:
|
|
70
|
+
try:
|
|
71
|
+
tool = self._convert_mcp_tool_to_pydantic_ai(mcp_tool)
|
|
72
|
+
if tool:
|
|
73
|
+
pydantic_tools.append(tool)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.error(
|
|
76
|
+
f"Failed to convert MCP tool {getattr(mcp_tool, 'name', 'unknown')}: {e}",
|
|
77
|
+
exc_info=True,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
logger.info(f"Converted {len(pydantic_tools)} tools to Pydantic AI format")
|
|
81
|
+
return pydantic_tools
|
|
82
|
+
|
|
83
|
+
def _convert_mcp_tool_to_pydantic_ai(self, mcp_tool: Any) -> Optional[Tool]:
|
|
84
|
+
"""
|
|
85
|
+
Convert a single MCP tool to Pydantic AI Tool.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
mcp_tool: MCP tool definition (should have name, description, inputSchema)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
pydantic_ai.Tool instance or None if conversion fails
|
|
92
|
+
"""
|
|
93
|
+
# Extract tool metadata
|
|
94
|
+
tool_name = (
|
|
95
|
+
getattr(mcp_tool, "name", None) or mcp_tool.get("name")
|
|
96
|
+
if isinstance(mcp_tool, dict)
|
|
97
|
+
else None
|
|
98
|
+
)
|
|
99
|
+
tool_description = (
|
|
100
|
+
getattr(mcp_tool, "description", None) or mcp_tool.get("description", "")
|
|
101
|
+
if isinstance(mcp_tool, dict)
|
|
102
|
+
else ""
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
if not tool_name:
|
|
106
|
+
logger.warning(f"MCP tool missing name: {mcp_tool}")
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
# Extract inputSchema (JSON Schema)
|
|
110
|
+
input_schema = None
|
|
111
|
+
if hasattr(mcp_tool, "inputSchema"):
|
|
112
|
+
input_schema = mcp_tool.inputSchema
|
|
113
|
+
elif isinstance(mcp_tool, dict) and "inputSchema" in mcp_tool:
|
|
114
|
+
input_schema = mcp_tool["inputSchema"]
|
|
115
|
+
elif hasattr(mcp_tool, "parameters"):
|
|
116
|
+
# Some MCP implementations use 'parameters' instead of 'inputSchema'
|
|
117
|
+
input_schema = mcp_tool.parameters
|
|
118
|
+
|
|
119
|
+
# Create Pydantic model from JSON Schema
|
|
120
|
+
if input_schema:
|
|
121
|
+
try:
|
|
122
|
+
args_model = self._json_schema_to_pydantic_model(input_schema, tool_name)
|
|
123
|
+
except Exception as e:
|
|
124
|
+
logger.error(
|
|
125
|
+
f"Failed to create Pydantic model for tool '{tool_name}': {e}", exc_info=True
|
|
126
|
+
)
|
|
127
|
+
# Fallback: create a simple model that accepts any dict
|
|
128
|
+
args_model = create_model(
|
|
129
|
+
f"{tool_name}Args", **{"args": (Dict[str, Any], Field(default={}))}
|
|
130
|
+
)
|
|
131
|
+
else:
|
|
132
|
+
# No schema - create empty model
|
|
133
|
+
args_model = create_model(f"{tool_name}Args")
|
|
134
|
+
|
|
135
|
+
# Create wrapper function that executes the MCP tool
|
|
136
|
+
async def tool_wrapper(args: args_model) -> str:
|
|
137
|
+
"""
|
|
138
|
+
Wrapper function that executes the MCP tool call.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
args: Validated arguments from Pydantic model
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Tool result as string
|
|
145
|
+
"""
|
|
146
|
+
# Convert Pydantic model to dict for MCP call
|
|
147
|
+
if hasattr(args, "model_dump"):
|
|
148
|
+
args_dict = args.model_dump()
|
|
149
|
+
elif hasattr(args, "dict"):
|
|
150
|
+
args_dict = args.dict()
|
|
151
|
+
else:
|
|
152
|
+
args_dict = dict(args) if hasattr(args, "__dict__") else {}
|
|
153
|
+
|
|
154
|
+
logger.info(f"Executing MCP tool '{tool_name}' with args: {args_dict}")
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
# Call MCP tool - common interface: call_tool(name, args) or tool.execute(args)
|
|
158
|
+
if hasattr(self.mcp_client, "call_tool"):
|
|
159
|
+
result = await self.mcp_client.call_tool(tool_name, args_dict)
|
|
160
|
+
elif hasattr(self.mcp_client, "call"):
|
|
161
|
+
result = await self.mcp_client.call(tool_name, args_dict)
|
|
162
|
+
elif hasattr(mcp_tool, "execute"):
|
|
163
|
+
result = await mcp_tool.execute(args_dict)
|
|
164
|
+
else:
|
|
165
|
+
# Try calling as a method
|
|
166
|
+
if callable(mcp_tool):
|
|
167
|
+
result = await mcp_tool(**args_dict)
|
|
168
|
+
else:
|
|
169
|
+
raise ValueError(
|
|
170
|
+
f"Cannot execute MCP tool '{tool_name}': no callable interface found"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Convert result to string
|
|
174
|
+
if isinstance(result, dict):
|
|
175
|
+
# MCP tools often return dict with 'content' or 'text' field
|
|
176
|
+
result_str = result.get("content") or result.get("text") or str(result)
|
|
177
|
+
elif isinstance(result, list):
|
|
178
|
+
result_str = str(result)
|
|
179
|
+
else:
|
|
180
|
+
result_str = str(result)
|
|
181
|
+
|
|
182
|
+
# Record tool call if tool_primitive is available
|
|
183
|
+
if self.tool_primitive:
|
|
184
|
+
self.tool_primitive.record_call(tool_name, args_dict, result_str)
|
|
185
|
+
|
|
186
|
+
logger.debug(f"Tool '{tool_name}' returned: {result_str[:100]}...")
|
|
187
|
+
return result_str
|
|
188
|
+
|
|
189
|
+
except Exception as e:
|
|
190
|
+
logger.error(f"MCP tool '{tool_name}' execution failed: {e}", exc_info=True)
|
|
191
|
+
error_msg = f"Error executing tool '{tool_name}': {str(e)}"
|
|
192
|
+
# Still record the failed call
|
|
193
|
+
if self.tool_primitive:
|
|
194
|
+
self.tool_primitive.record_call(tool_name, args_dict, error_msg)
|
|
195
|
+
raise
|
|
196
|
+
|
|
197
|
+
# Create Pydantic AI Tool
|
|
198
|
+
tool = Tool(
|
|
199
|
+
tool_wrapper, name=tool_name, description=tool_description or f"Tool: {tool_name}"
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return tool
|
|
203
|
+
|
|
204
|
+
def _json_schema_to_pydantic_model(
|
|
205
|
+
self, schema: Dict[str, Any], base_name: str = "Model"
|
|
206
|
+
) -> type:
|
|
207
|
+
"""
|
|
208
|
+
Convert JSON Schema to a Pydantic model.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
schema: JSON Schema dictionary
|
|
212
|
+
base_name: Base name for the generated model
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Pydantic model class
|
|
216
|
+
"""
|
|
217
|
+
# Type mapping from JSON Schema to Python types
|
|
218
|
+
type_mapping = {
|
|
219
|
+
"string": str,
|
|
220
|
+
"integer": int,
|
|
221
|
+
"number": float,
|
|
222
|
+
"boolean": bool,
|
|
223
|
+
"array": list,
|
|
224
|
+
"object": dict,
|
|
225
|
+
"null": type(None),
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# Handle schema type
|
|
229
|
+
schema_type = schema.get("type", "object")
|
|
230
|
+
if schema_type != "object":
|
|
231
|
+
# For non-object schemas, create a simple wrapper
|
|
232
|
+
python_type = type_mapping.get(schema_type, Any)
|
|
233
|
+
return create_model(f"{base_name}Args", value=(python_type, Field(...)))
|
|
234
|
+
|
|
235
|
+
# Extract properties
|
|
236
|
+
properties = schema.get("properties", {})
|
|
237
|
+
required_fields = schema.get("required", [])
|
|
238
|
+
|
|
239
|
+
# Build fields dict for create_model
|
|
240
|
+
fields = {}
|
|
241
|
+
for field_name, field_schema in properties.items():
|
|
242
|
+
field_type_info = field_schema.get("type", "string")
|
|
243
|
+
python_type = type_mapping.get(field_type_info, Any)
|
|
244
|
+
|
|
245
|
+
# Check if field is required
|
|
246
|
+
is_required = field_name in required_fields
|
|
247
|
+
|
|
248
|
+
# Handle default values
|
|
249
|
+
default_value = field_schema.get("default", ...)
|
|
250
|
+
if not is_required and default_value is ...:
|
|
251
|
+
default_value = None
|
|
252
|
+
|
|
253
|
+
# Create Field with description if available
|
|
254
|
+
field_description = field_schema.get("description", "")
|
|
255
|
+
if field_description:
|
|
256
|
+
fields[field_name] = (
|
|
257
|
+
python_type,
|
|
258
|
+
Field(default=default_value, description=field_description),
|
|
259
|
+
)
|
|
260
|
+
else:
|
|
261
|
+
fields[field_name] = (python_type, Field(default=default_value))
|
|
262
|
+
|
|
263
|
+
# Create the model
|
|
264
|
+
model_name = schema.get("title", f"{base_name}Args")
|
|
265
|
+
return create_model(model_name, **fields)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def convert_mcp_tools_to_pydantic_ai(
|
|
269
|
+
mcp_tools: List[Any], tool_primitive: Optional[Any] = None
|
|
270
|
+
) -> List[Tool]:
|
|
271
|
+
"""
|
|
272
|
+
Convert MCP tools to Pydantic AI Tool format.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
mcp_tools: List of MCP tool objects
|
|
276
|
+
tool_primitive: Optional ToolPrimitive for recording calls
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
List of pydantic_ai.Tool objects
|
|
280
|
+
|
|
281
|
+
Note: This is a convenience function. For full MCP integration,
|
|
282
|
+
use PydanticAIMCPAdapter with an MCP client.
|
|
283
|
+
"""
|
|
284
|
+
# This function is kept for backward compatibility but requires an adapter
|
|
285
|
+
# In practice, use PydanticAIMCPAdapter.load_tools() instead
|
|
286
|
+
logger.warning(
|
|
287
|
+
"convert_mcp_tools_to_pydantic_ai() is deprecated - use PydanticAIMCPAdapter instead"
|
|
288
|
+
)
|
|
289
|
+
return []
|