tactus 0.31.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.2.dist-info/METADATA +1809 -0
- tactus-0.31.2.dist-info/RECORD +160 -0
- tactus-0.31.2.dist-info/WHEEL +4 -0
- tactus-0.31.2.dist-info/entry_points.txt +2 -0
- tactus-0.31.2.dist-info/licenses/LICENSE +21 -0
tactus/core/runtime.py
ADDED
|
@@ -0,0 +1,2907 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tactus Runtime - Main execution engine for Lua-based workflows.
|
|
3
|
+
|
|
4
|
+
Orchestrates:
|
|
5
|
+
1. Lua DSL parsing and validation (via registry)
|
|
6
|
+
2. Lua sandbox setup
|
|
7
|
+
3. Primitive injection
|
|
8
|
+
4. Agent configuration with LLMs and tools (optional)
|
|
9
|
+
5. Workflow execution
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import io
|
|
13
|
+
import logging
|
|
14
|
+
import time
|
|
15
|
+
import uuid
|
|
16
|
+
from typing import Dict, Any, Optional
|
|
17
|
+
|
|
18
|
+
from tactus.core.registry import ProcedureRegistry, RegistryBuilder
|
|
19
|
+
from tactus.core.dsl_stubs import create_dsl_stubs, lua_table_to_dict
|
|
20
|
+
from tactus.core.template_resolver import TemplateResolver
|
|
21
|
+
from tactus.core.message_history_manager import MessageHistoryManager
|
|
22
|
+
from tactus.core.lua_sandbox import LuaSandbox, LuaSandboxError
|
|
23
|
+
from tactus.core.output_validator import OutputValidator, OutputValidationError
|
|
24
|
+
from tactus.core.execution_context import BaseExecutionContext
|
|
25
|
+
from tactus.core.exceptions import ProcedureWaitingForHuman, TactusRuntimeError
|
|
26
|
+
from tactus.protocols.storage import StorageBackend
|
|
27
|
+
from tactus.protocols.hitl import HITLHandler
|
|
28
|
+
from tactus.protocols.chat_recorder import ChatRecorder
|
|
29
|
+
|
|
30
|
+
# For backwards compatibility with YAML
|
|
31
|
+
try:
|
|
32
|
+
from tactus.core.yaml_parser import ProcedureYAMLParser, ProcedureConfigError
|
|
33
|
+
except ImportError:
|
|
34
|
+
ProcedureYAMLParser = None
|
|
35
|
+
ProcedureConfigError = TactusRuntimeError
|
|
36
|
+
|
|
37
|
+
# Import primitives
|
|
38
|
+
from tactus.primitives.state import StatePrimitive
|
|
39
|
+
from tactus.primitives.control import IterationsPrimitive, StopPrimitive
|
|
40
|
+
from tactus.primitives.tool import ToolPrimitive
|
|
41
|
+
from tactus.primitives.human import HumanPrimitive
|
|
42
|
+
from tactus.primitives.step import StepPrimitive, CheckpointPrimitive
|
|
43
|
+
from tactus.primitives.log import LogPrimitive
|
|
44
|
+
from tactus.primitives.message_history import MessageHistoryPrimitive
|
|
45
|
+
from tactus.primitives.json import JsonPrimitive
|
|
46
|
+
from tactus.primitives.retry import RetryPrimitive
|
|
47
|
+
from tactus.primitives.file import FilePrimitive
|
|
48
|
+
from tactus.primitives.procedure import ProcedurePrimitive
|
|
49
|
+
from tactus.primitives.system import SystemPrimitive
|
|
50
|
+
from tactus.primitives.host import HostPrimitive
|
|
51
|
+
|
|
52
|
+
logger = logging.getLogger(__name__)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class TactusRuntime:
|
|
56
|
+
"""
|
|
57
|
+
Main execution engine for Lua-based workflows.
|
|
58
|
+
|
|
59
|
+
Responsibilities:
|
|
60
|
+
- Parse and validate YAML configuration
|
|
61
|
+
- Setup sandboxed Lua environment
|
|
62
|
+
- Create and inject primitives
|
|
63
|
+
- Configure agents with LLMs and tools (if available)
|
|
64
|
+
- Execute Lua workflow code
|
|
65
|
+
- Return results
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
procedure_id: str,
|
|
71
|
+
storage_backend: Optional[StorageBackend] = None,
|
|
72
|
+
hitl_handler: Optional[HITLHandler] = None,
|
|
73
|
+
chat_recorder: Optional[ChatRecorder] = None,
|
|
74
|
+
mcp_server=None,
|
|
75
|
+
mcp_servers: Optional[Dict[str, Any]] = None,
|
|
76
|
+
openai_api_key: Optional[str] = None,
|
|
77
|
+
log_handler=None,
|
|
78
|
+
tool_primitive: Optional[ToolPrimitive] = None,
|
|
79
|
+
recursion_depth: int = 0,
|
|
80
|
+
tool_paths: Optional[list] = None,
|
|
81
|
+
external_config: Optional[Dict[str, Any]] = None,
|
|
82
|
+
run_id: Optional[str] = None,
|
|
83
|
+
source_file_path: Optional[str] = None,
|
|
84
|
+
):
|
|
85
|
+
"""
|
|
86
|
+
Initialize the Tactus runtime.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
procedure_id: Unique procedure identifier
|
|
90
|
+
storage_backend: Storage backend for checkpoints and state
|
|
91
|
+
hitl_handler: Handler for human-in-the-loop interactions
|
|
92
|
+
chat_recorder: Optional chat recorder for conversation logging
|
|
93
|
+
mcp_server: DEPRECATED - use mcp_servers instead
|
|
94
|
+
mcp_servers: Optional dict of MCP server configs {name: {command, args, env}}
|
|
95
|
+
openai_api_key: Optional OpenAI API key for LLMs
|
|
96
|
+
log_handler: Optional handler for structured log events
|
|
97
|
+
tool_primitive: Optional pre-configured ToolPrimitive (for testing with mocks)
|
|
98
|
+
tool_paths: Optional list of paths to scan for local Python tool plugins
|
|
99
|
+
external_config: Optional external config (from .tac.yml) to merge with DSL config
|
|
100
|
+
run_id: Optional run identifier for tagging checkpoints
|
|
101
|
+
source_file_path: Optional path to the .tac file being executed (for accurate source locations)
|
|
102
|
+
"""
|
|
103
|
+
self.procedure_id = procedure_id
|
|
104
|
+
self.storage_backend = storage_backend
|
|
105
|
+
self.hitl_handler = hitl_handler
|
|
106
|
+
self.chat_recorder = chat_recorder
|
|
107
|
+
self.mcp_server = mcp_server # Keep for backward compatibility
|
|
108
|
+
self.mcp_servers = mcp_servers or {}
|
|
109
|
+
self.mcp_manager = None # Will be initialized in _setup_agents
|
|
110
|
+
self.openai_api_key = openai_api_key
|
|
111
|
+
self.log_handler = log_handler
|
|
112
|
+
self._injected_tool_primitive = tool_primitive
|
|
113
|
+
self.tool_paths = tool_paths or []
|
|
114
|
+
self.recursion_depth = recursion_depth
|
|
115
|
+
self.external_config = external_config or {}
|
|
116
|
+
self.run_id = run_id
|
|
117
|
+
self.source_file_path = source_file_path
|
|
118
|
+
|
|
119
|
+
# Will be initialized during setup
|
|
120
|
+
self.config: Optional[Dict[str, Any]] = None # Legacy YAML support
|
|
121
|
+
self.registry: Optional[ProcedureRegistry] = None # New DSL registry
|
|
122
|
+
self.lua_sandbox: Optional[LuaSandbox] = None
|
|
123
|
+
self.output_validator: Optional[OutputValidator] = None
|
|
124
|
+
self.template_resolver: Optional[TemplateResolver] = None
|
|
125
|
+
self.message_history_manager: Optional[MessageHistoryManager] = None
|
|
126
|
+
|
|
127
|
+
# Execution context
|
|
128
|
+
self.execution_context: Optional[BaseExecutionContext] = None
|
|
129
|
+
|
|
130
|
+
# Primitives (shared across all agents)
|
|
131
|
+
self.state_primitive: Optional[StatePrimitive] = None
|
|
132
|
+
self.iterations_primitive: Optional[IterationsPrimitive] = None
|
|
133
|
+
self.stop_primitive: Optional[StopPrimitive] = None
|
|
134
|
+
self.tool_primitive: Optional[ToolPrimitive] = None
|
|
135
|
+
self.human_primitive: Optional[HumanPrimitive] = None
|
|
136
|
+
self.step_primitive: Optional[StepPrimitive] = None
|
|
137
|
+
self.checkpoint_primitive: Optional[CheckpointPrimitive] = None
|
|
138
|
+
self.log_primitive: Optional[LogPrimitive] = None
|
|
139
|
+
self.json_primitive: Optional[JsonPrimitive] = None
|
|
140
|
+
self.retry_primitive: Optional[RetryPrimitive] = None
|
|
141
|
+
self.file_primitive: Optional[FilePrimitive] = None
|
|
142
|
+
self.procedure_primitive: Optional[ProcedurePrimitive] = None
|
|
143
|
+
self.system_primitive: Optional[SystemPrimitive] = None
|
|
144
|
+
self.host_primitive: Optional[HostPrimitive] = None
|
|
145
|
+
|
|
146
|
+
# Agent primitives (one per agent)
|
|
147
|
+
self.agents: Dict[str, Any] = {}
|
|
148
|
+
|
|
149
|
+
# Model primitives (one per model)
|
|
150
|
+
self.models: Dict[str, Any] = {}
|
|
151
|
+
|
|
152
|
+
# Toolset registry (name -> AbstractToolset instance)
|
|
153
|
+
self.toolset_registry: Dict[str, Any] = {}
|
|
154
|
+
|
|
155
|
+
# User dependencies (HTTP clients, DB connections, etc.)
|
|
156
|
+
self.user_dependencies: Dict[str, Any] = {}
|
|
157
|
+
self.dependency_manager: Optional[Any] = None # ResourceManager for cleanup
|
|
158
|
+
|
|
159
|
+
# Mock manager for testing
|
|
160
|
+
self.mock_manager: Optional[Any] = None # MockManager instance
|
|
161
|
+
self.external_agent_mocks: Optional[dict[str, list[dict[str, Any]]]] = None
|
|
162
|
+
self.mock_all_agents: bool = False
|
|
163
|
+
|
|
164
|
+
logger.info(f"TactusRuntime initialized for procedure {procedure_id}")
|
|
165
|
+
|
|
166
|
+
async def execute(
|
|
167
|
+
self, source: str, context: Optional[Dict[str, Any]] = None, format: str = "yaml"
|
|
168
|
+
) -> Dict[str, Any]:
|
|
169
|
+
"""
|
|
170
|
+
Execute a workflow (Lua DSL or legacy YAML format).
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
source: Lua DSL source code (.tac) or YAML config (legacy)
|
|
174
|
+
context: Optional context dict with pre-loaded data (can override params)
|
|
175
|
+
format: Source format - "lua" (default) or "yaml" (legacy)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Execution results dict with:
|
|
179
|
+
- success: bool
|
|
180
|
+
- result: Any (return value from Lua workflow)
|
|
181
|
+
- state: Final state
|
|
182
|
+
- iterations: Number of iterations
|
|
183
|
+
- tools_used: List of tool names called
|
|
184
|
+
- error: Error message if failed
|
|
185
|
+
|
|
186
|
+
Raises:
|
|
187
|
+
TactusRuntimeError: If execution fails
|
|
188
|
+
"""
|
|
189
|
+
session_id = None
|
|
190
|
+
self.context = context or {} # Store context for param merging
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
# 0. Setup Lua sandbox FIRST (needed for both YAML and Lua DSL)
|
|
194
|
+
logger.info("Step 0: Setting up Lua sandbox")
|
|
195
|
+
strict_determinism = self.external_config.get("strict_determinism", False)
|
|
196
|
+
|
|
197
|
+
# Compute base_path for sandbox from source file path if available
|
|
198
|
+
# This ensures require() works correctly even when running from different directories
|
|
199
|
+
sandbox_base_path = None
|
|
200
|
+
if self.source_file_path:
|
|
201
|
+
from pathlib import Path
|
|
202
|
+
|
|
203
|
+
sandbox_base_path = str(Path(self.source_file_path).parent.resolve())
|
|
204
|
+
logger.debug(
|
|
205
|
+
f"Using source file directory as sandbox base_path: {sandbox_base_path}"
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
self.lua_sandbox = LuaSandbox(
|
|
209
|
+
execution_context=None,
|
|
210
|
+
strict_determinism=strict_determinism,
|
|
211
|
+
base_path=sandbox_base_path,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# 0.5. Create execution context EARLY so it's available during DSL parsing
|
|
215
|
+
# This is critical for immediate agent creation during parsing
|
|
216
|
+
logger.info("Step 0.5: Creating execution context (early)")
|
|
217
|
+
self.execution_context = BaseExecutionContext(
|
|
218
|
+
procedure_id=self.procedure_id,
|
|
219
|
+
storage_backend=self.storage_backend,
|
|
220
|
+
hitl_handler=self.hitl_handler,
|
|
221
|
+
strict_determinism=strict_determinism,
|
|
222
|
+
log_handler=self.log_handler,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Set run_id if provided
|
|
226
|
+
if self.run_id:
|
|
227
|
+
self.execution_context.set_run_id(self.run_id)
|
|
228
|
+
logger.debug(
|
|
229
|
+
"[CHECKPOINT] BaseExecutionContext created early for immediate agent creation"
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Attach execution context to sandbox for determinism checking (bidirectional)
|
|
233
|
+
self.lua_sandbox.set_execution_context(self.execution_context)
|
|
234
|
+
# Also store lua_sandbox reference on execution_context for debug.getinfo access
|
|
235
|
+
self.execution_context.set_lua_sandbox(self.lua_sandbox)
|
|
236
|
+
logger.debug("[CHECKPOINT] ExecutionContext and LuaSandbox connected bidirectionally")
|
|
237
|
+
|
|
238
|
+
# Set .tac file path NOW (before parsing) so source location is available during agent calls
|
|
239
|
+
if self.source_file_path:
|
|
240
|
+
self.execution_context.set_tac_file(self.source_file_path, source)
|
|
241
|
+
logger.info(f"[CHECKPOINT] Set .tac file path EARLY: {self.source_file_path}")
|
|
242
|
+
else:
|
|
243
|
+
logger.warning("[CHECKPOINT] .tac file path NOT set - source_file_path is None")
|
|
244
|
+
|
|
245
|
+
# 0b. For Lua DSL, inject placeholder primitives BEFORE parsing
|
|
246
|
+
# so they're available in the procedure function's closure
|
|
247
|
+
placeholder_tool = None # Will be set for Lua DSL
|
|
248
|
+
if format == "lua":
|
|
249
|
+
logger.debug("Pre-injecting placeholder primitives for Lua DSL parsing")
|
|
250
|
+
# Import here to avoid issues with YAML format
|
|
251
|
+
from tactus.primitives.log import LogPrimitive as LuaLogPrimitive
|
|
252
|
+
from tactus.primitives.state import StatePrimitive as LuaStatePrimitive
|
|
253
|
+
from tactus.primitives.tool import ToolPrimitive as LuaToolPrimitive
|
|
254
|
+
from tactus.primitives.system import SystemPrimitive as LuaSystemPrimitive
|
|
255
|
+
|
|
256
|
+
# Create minimal primitives that don't need full config
|
|
257
|
+
placeholder_log = LuaLogPrimitive(procedure_id=self.procedure_id)
|
|
258
|
+
placeholder_state = LuaStatePrimitive()
|
|
259
|
+
# Use injected tool primitive if provided (for mock mode)
|
|
260
|
+
# This ensures ToolHandles (like done) use the same primitive as MockAgentPrimitive
|
|
261
|
+
if self._injected_tool_primitive:
|
|
262
|
+
placeholder_tool = self._injected_tool_primitive
|
|
263
|
+
logger.debug("Using injected tool primitive for parsing (mock mode)")
|
|
264
|
+
else:
|
|
265
|
+
# Create tool primitive with log_handler so direct tool calls are tracked
|
|
266
|
+
placeholder_tool = LuaToolPrimitive(
|
|
267
|
+
log_handler=self.log_handler, procedure_id=self.procedure_id
|
|
268
|
+
)
|
|
269
|
+
placeholder_params = {} # Empty params dict
|
|
270
|
+
self.lua_sandbox.inject_primitive("Log", placeholder_log)
|
|
271
|
+
# Inject _state_primitive for metatable to use
|
|
272
|
+
self.lua_sandbox.inject_primitive("_state_primitive", placeholder_state)
|
|
273
|
+
|
|
274
|
+
# Create State object with special methods and lowercase state proxy with metatable
|
|
275
|
+
self.lua_sandbox.lua.execute(
|
|
276
|
+
"""
|
|
277
|
+
State = {
|
|
278
|
+
increment = function(key, amount)
|
|
279
|
+
return _state_primitive.increment(key, amount or 1)
|
|
280
|
+
end,
|
|
281
|
+
append = function(key, value)
|
|
282
|
+
return _state_primitive.append(key, value)
|
|
283
|
+
end,
|
|
284
|
+
all = function()
|
|
285
|
+
return _state_primitive.all()
|
|
286
|
+
end
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
-- Create lowercase 'state' proxy with metatable
|
|
290
|
+
state = setmetatable({}, {
|
|
291
|
+
__index = function(_, key)
|
|
292
|
+
return _state_primitive.get(key)
|
|
293
|
+
end,
|
|
294
|
+
__newindex = function(_, key, value)
|
|
295
|
+
_state_primitive.set(key, value)
|
|
296
|
+
end
|
|
297
|
+
})
|
|
298
|
+
"""
|
|
299
|
+
)
|
|
300
|
+
self.lua_sandbox.inject_primitive("Tool", placeholder_tool)
|
|
301
|
+
self.lua_sandbox.inject_primitive("params", placeholder_params)
|
|
302
|
+
placeholder_system = LuaSystemPrimitive(
|
|
303
|
+
procedure_id=self.procedure_id, log_handler=self.log_handler
|
|
304
|
+
)
|
|
305
|
+
self.lua_sandbox.inject_primitive("System", placeholder_system)
|
|
306
|
+
|
|
307
|
+
# 1. Parse configuration (Lua DSL or YAML)
|
|
308
|
+
if format == "lua":
|
|
309
|
+
logger.info("Step 1: Parsing Lua DSL configuration")
|
|
310
|
+
|
|
311
|
+
# Script mode: wrap top-level executable code in an implicit main Procedure
|
|
312
|
+
# so agents/tools aren't executed during parsing.
|
|
313
|
+
source = self._maybe_transform_script_mode_source(source)
|
|
314
|
+
|
|
315
|
+
# Pass placeholder_tool so tool() can return callable ToolHandles
|
|
316
|
+
self.registry = self._parse_declarations(source, placeholder_tool)
|
|
317
|
+
logger.info("Loaded procedure from Lua DSL")
|
|
318
|
+
# Convert registry to config dict for compatibility
|
|
319
|
+
self.config = self._registry_to_config(self.registry)
|
|
320
|
+
logger.debug(
|
|
321
|
+
f"Registry contents: agents={list(self.registry.agents.keys())}, lua_tools={list(self.registry.lua_tools.keys())}"
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Process mocks from registry if mock_manager exists
|
|
325
|
+
if self.mock_manager and self.registry.mocks:
|
|
326
|
+
logger.info(f"Registering {len(self.registry.mocks)} mocks from DSL")
|
|
327
|
+
for tool_name, mock_config in self.registry.mocks.items():
|
|
328
|
+
self.mock_manager.register_mock(tool_name, mock_config)
|
|
329
|
+
self.mock_manager.enable_mock(tool_name)
|
|
330
|
+
logger.debug(f"Registered and enabled mock for tool '{tool_name}'")
|
|
331
|
+
|
|
332
|
+
# Apply external, per-scenario agent mocks (from BDD steps).
|
|
333
|
+
# These should take precedence over any `Mocks { ... }` declared in the .tac file.
|
|
334
|
+
if self.external_agent_mocks and self.registry:
|
|
335
|
+
from tactus.core.registry import AgentMockConfig
|
|
336
|
+
|
|
337
|
+
for agent_name, temporal_turns in self.external_agent_mocks.items():
|
|
338
|
+
if not isinstance(temporal_turns, list):
|
|
339
|
+
raise TactusRuntimeError(
|
|
340
|
+
f"External agent mocks for '{agent_name}' must be a list of turns"
|
|
341
|
+
)
|
|
342
|
+
self.registry.agent_mocks[agent_name] = AgentMockConfig(
|
|
343
|
+
temporal=temporal_turns
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# If we're in mocked mode, ensure agents are mocked deterministically even if
|
|
347
|
+
# the .tac file doesn't declare `Mocks { ... }` for them.
|
|
348
|
+
if self.mock_all_agents and self.registry:
|
|
349
|
+
from tactus.core.registry import AgentMockConfig
|
|
350
|
+
|
|
351
|
+
for agent_name in self.registry.agents.keys():
|
|
352
|
+
if agent_name not in self.registry.agent_mocks:
|
|
353
|
+
self.registry.agent_mocks[agent_name] = AgentMockConfig(
|
|
354
|
+
message=f"Mocked response from {agent_name}"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Merge external config (from .tac.yml) into self.config
|
|
358
|
+
# External config provides toolsets, default_toolsets, etc.
|
|
359
|
+
if self.external_config:
|
|
360
|
+
# Merge toolsets from external config
|
|
361
|
+
if "toolsets" in self.external_config:
|
|
362
|
+
if "toolsets" not in self.config:
|
|
363
|
+
self.config["toolsets"] = {}
|
|
364
|
+
self.config["toolsets"].update(self.external_config["toolsets"])
|
|
365
|
+
|
|
366
|
+
# Merge other external config keys (like default_toolsets)
|
|
367
|
+
for key in ["default_toolsets", "default_model", "default_provider"]:
|
|
368
|
+
if key in self.external_config:
|
|
369
|
+
self.config[key] = self.external_config[key]
|
|
370
|
+
|
|
371
|
+
logger.debug(f"Merged external config with {len(self.external_config)} keys")
|
|
372
|
+
else:
|
|
373
|
+
# Legacy YAML support
|
|
374
|
+
logger.info("Step 1: Parsing YAML configuration (legacy)")
|
|
375
|
+
if ProcedureYAMLParser is None:
|
|
376
|
+
raise TactusRuntimeError("YAML support not available - use Lua DSL format")
|
|
377
|
+
self.config = ProcedureYAMLParser.parse(source)
|
|
378
|
+
logger.info(f"Loaded procedure: {self.config['name']} v{self.config['version']}")
|
|
379
|
+
|
|
380
|
+
# 2. Setup output validator
|
|
381
|
+
logger.info("Step 2: Setting up output validator")
|
|
382
|
+
output_schema = self.config.get("output", {})
|
|
383
|
+
self.output_validator = OutputValidator(output_schema)
|
|
384
|
+
if output_schema:
|
|
385
|
+
logger.info(
|
|
386
|
+
f"Output schema has {len(output_schema)} fields: {list(output_schema.keys())}"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# 3. Lua sandbox is already set up in step 0
|
|
390
|
+
# (keeping this comment for step numbering consistency)
|
|
391
|
+
|
|
392
|
+
# 4. Initialize primitives
|
|
393
|
+
logger.info("Step 4: Initializing primitives")
|
|
394
|
+
# Pass placeholder_tool so direct tool calls are tracked in the same primitive
|
|
395
|
+
await self._initialize_primitives(placeholder_tool=placeholder_tool)
|
|
396
|
+
|
|
397
|
+
# 4b. Initialize template resolver and session manager
|
|
398
|
+
self.template_resolver = TemplateResolver(
|
|
399
|
+
params=context or {},
|
|
400
|
+
state={}, # Will be updated dynamically
|
|
401
|
+
)
|
|
402
|
+
self.message_history_manager = MessageHistoryManager()
|
|
403
|
+
logger.debug("Template resolver and message history manager initialized")
|
|
404
|
+
|
|
405
|
+
# 5. Start chat session if recorder available
|
|
406
|
+
if self.chat_recorder:
|
|
407
|
+
logger.info("Step 5: Starting chat session")
|
|
408
|
+
session_id = await self.chat_recorder.start_session(context)
|
|
409
|
+
if session_id:
|
|
410
|
+
logger.info(f"Chat session started: {session_id}")
|
|
411
|
+
else:
|
|
412
|
+
logger.warning("Failed to create chat session - continuing without recording")
|
|
413
|
+
|
|
414
|
+
# 6. Execution context already created in Step 0.5
|
|
415
|
+
# (.tac file path and lua_sandbox reference already set in Step 0.5)
|
|
416
|
+
logger.info("Step 6: Execution context configuration (already done in Step 0.5)")
|
|
417
|
+
|
|
418
|
+
# 7. Initialize HITL and checkpoint primitives (require execution_context)
|
|
419
|
+
logger.info("Step 7: Initializing HITL and checkpoint primitives")
|
|
420
|
+
hitl_config = self.config.get("hitl", {})
|
|
421
|
+
self.human_primitive = HumanPrimitive(self.execution_context, hitl_config)
|
|
422
|
+
self.step_primitive = StepPrimitive(self.execution_context)
|
|
423
|
+
self.checkpoint_primitive = CheckpointPrimitive(self.execution_context)
|
|
424
|
+
self.log_primitive = LogPrimitive(
|
|
425
|
+
procedure_id=self.procedure_id, log_handler=self.log_handler
|
|
426
|
+
)
|
|
427
|
+
self.message_history_primitive = MessageHistoryPrimitive(
|
|
428
|
+
message_history_manager=self.message_history_manager
|
|
429
|
+
)
|
|
430
|
+
self.json_primitive = JsonPrimitive(lua_sandbox=self.lua_sandbox)
|
|
431
|
+
self.retry_primitive = RetryPrimitive()
|
|
432
|
+
self.file_primitive = FilePrimitive(execution_context=self.execution_context)
|
|
433
|
+
self.system_primitive = SystemPrimitive(
|
|
434
|
+
procedure_id=self.procedure_id, log_handler=self.log_handler
|
|
435
|
+
)
|
|
436
|
+
self.host_primitive = HostPrimitive()
|
|
437
|
+
|
|
438
|
+
# Initialize Procedure primitive (requires execution_context)
|
|
439
|
+
max_depth = self.config.get("max_depth", 5) if self.config else 5
|
|
440
|
+
self.procedure_primitive = ProcedurePrimitive(
|
|
441
|
+
execution_context=self.execution_context,
|
|
442
|
+
runtime_factory=self._create_runtime_for_procedure,
|
|
443
|
+
lua_sandbox=self.lua_sandbox,
|
|
444
|
+
max_depth=max_depth,
|
|
445
|
+
current_depth=self.recursion_depth,
|
|
446
|
+
)
|
|
447
|
+
logger.debug("HITL, checkpoint, message history, and procedure primitives initialized")
|
|
448
|
+
|
|
449
|
+
# 7.5. Initialize toolset registry
|
|
450
|
+
logger.info("Step 7.5: Initializing toolset registry")
|
|
451
|
+
await self._initialize_toolsets()
|
|
452
|
+
|
|
453
|
+
# 7.6. Initialize named procedure callables
|
|
454
|
+
logger.info("Step 7.6: Initializing named procedure callables")
|
|
455
|
+
await self._initialize_named_procedures()
|
|
456
|
+
|
|
457
|
+
# 8. Setup agents with LLMs and tools
|
|
458
|
+
logger.info("Step 8: Setting up agents")
|
|
459
|
+
# Set OpenAI API key in environment if provided (for OpenAI agents)
|
|
460
|
+
import os
|
|
461
|
+
|
|
462
|
+
if self.openai_api_key and "OPENAI_API_KEY" not in os.environ:
|
|
463
|
+
os.environ["OPENAI_API_KEY"] = self.openai_api_key
|
|
464
|
+
|
|
465
|
+
# Always set up agents - they may use providers other than OpenAI (e.g., Bedrock)
|
|
466
|
+
await self._setup_agents(context or {})
|
|
467
|
+
|
|
468
|
+
# Setup models for ML inference
|
|
469
|
+
await self._setup_models()
|
|
470
|
+
|
|
471
|
+
# 9. Inject primitives into Lua
|
|
472
|
+
logger.info("Step 9: Injecting primitives into Lua environment")
|
|
473
|
+
self._inject_primitives()
|
|
474
|
+
|
|
475
|
+
# 10. Execute workflow (may raise ProcedureWaitingForHuman)
|
|
476
|
+
logger.info("Step 10: Executing Lua workflow")
|
|
477
|
+
workflow_result = self._execute_workflow()
|
|
478
|
+
|
|
479
|
+
# 10.5. Apply return_prompt if specified (future: inject to agent for summary)
|
|
480
|
+
if self.config.get("return_prompt"):
|
|
481
|
+
return_prompt = self.config["return_prompt"]
|
|
482
|
+
logger.info(f"Return prompt specified: {return_prompt[:50]}...")
|
|
483
|
+
# TODO: In full implementation, inject this prompt to an agent to get a summary
|
|
484
|
+
# For now, just log it
|
|
485
|
+
|
|
486
|
+
# 11. Validate workflow output
|
|
487
|
+
logger.info("Step 11: Validating workflow output")
|
|
488
|
+
try:
|
|
489
|
+
validated_result = self.output_validator.validate(workflow_result)
|
|
490
|
+
logger.info("✓ Output validation passed")
|
|
491
|
+
except OutputValidationError as e:
|
|
492
|
+
logger.error(f"Output validation failed: {e}")
|
|
493
|
+
# Still continue but mark as validation failure
|
|
494
|
+
validated_result = workflow_result
|
|
495
|
+
|
|
496
|
+
# 12. Flush all queued chat recordings
|
|
497
|
+
if self.chat_recorder:
|
|
498
|
+
logger.info("Step 12: Flushing chat recordings")
|
|
499
|
+
# Flush agent messages if agents have flush capability
|
|
500
|
+
for agent_name, agent_primitive in self.agents.items():
|
|
501
|
+
if hasattr(agent_primitive, "flush_recordings"):
|
|
502
|
+
await agent_primitive.flush_recordings()
|
|
503
|
+
|
|
504
|
+
# 13. End chat session
|
|
505
|
+
if self.chat_recorder and session_id:
|
|
506
|
+
await self.chat_recorder.end_session(session_id, status="COMPLETED")
|
|
507
|
+
|
|
508
|
+
# 14. Build final results
|
|
509
|
+
final_state = self.state_primitive.all() if self.state_primitive else {}
|
|
510
|
+
tools_used = (
|
|
511
|
+
[call.name for call in self.tool_primitive.get_all_calls()]
|
|
512
|
+
if self.tool_primitive
|
|
513
|
+
else []
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
logger.info(
|
|
517
|
+
f"Workflow execution complete: "
|
|
518
|
+
f"{self.iterations_primitive.current() if self.iterations_primitive else 0} iterations, "
|
|
519
|
+
f"{len(tools_used)} tool calls"
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
# Collect cost events and calculate totals
|
|
523
|
+
cost_breakdown = []
|
|
524
|
+
total_cost = 0.0
|
|
525
|
+
total_tokens = 0
|
|
526
|
+
|
|
527
|
+
if self.log_handler and hasattr(self.log_handler, "cost_events"):
|
|
528
|
+
# Get cost events from log handler
|
|
529
|
+
cost_breakdown = self.log_handler.cost_events
|
|
530
|
+
for event in cost_breakdown:
|
|
531
|
+
total_cost += event.total_cost
|
|
532
|
+
total_tokens += event.total_tokens
|
|
533
|
+
|
|
534
|
+
# Send execution summary event if log handler is available
|
|
535
|
+
if self.log_handler:
|
|
536
|
+
from tactus.protocols.models import ExecutionSummaryEvent
|
|
537
|
+
|
|
538
|
+
# Compute checkpoint metrics from execution log
|
|
539
|
+
checkpoint_count = 0
|
|
540
|
+
checkpoint_types = {}
|
|
541
|
+
checkpoint_duration_ms = 0.0
|
|
542
|
+
|
|
543
|
+
if self.execution_context and hasattr(self.execution_context, "metadata"):
|
|
544
|
+
checkpoints = self.execution_context.metadata.execution_log
|
|
545
|
+
checkpoint_count = len(checkpoints)
|
|
546
|
+
|
|
547
|
+
for checkpoint in checkpoints:
|
|
548
|
+
# Count by type
|
|
549
|
+
checkpoint_type = checkpoint.type
|
|
550
|
+
checkpoint_types[checkpoint_type] = (
|
|
551
|
+
checkpoint_types.get(checkpoint_type, 0) + 1
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
# Sum durations
|
|
555
|
+
if checkpoint.duration_ms:
|
|
556
|
+
checkpoint_duration_ms += checkpoint.duration_ms
|
|
557
|
+
|
|
558
|
+
summary_event = ExecutionSummaryEvent(
|
|
559
|
+
result=validated_result,
|
|
560
|
+
final_state=final_state,
|
|
561
|
+
iterations=(
|
|
562
|
+
self.iterations_primitive.current() if self.iterations_primitive else 0
|
|
563
|
+
),
|
|
564
|
+
tools_used=tools_used,
|
|
565
|
+
procedure_id=self.procedure_id,
|
|
566
|
+
total_cost=total_cost,
|
|
567
|
+
total_tokens=total_tokens,
|
|
568
|
+
cost_breakdown=cost_breakdown,
|
|
569
|
+
checkpoint_count=checkpoint_count,
|
|
570
|
+
checkpoint_types=checkpoint_types,
|
|
571
|
+
checkpoint_duration_ms=(
|
|
572
|
+
checkpoint_duration_ms if checkpoint_duration_ms > 0 else None
|
|
573
|
+
),
|
|
574
|
+
exit_code=0, # Success
|
|
575
|
+
)
|
|
576
|
+
self.log_handler.log(summary_event)
|
|
577
|
+
|
|
578
|
+
return {
|
|
579
|
+
"success": True,
|
|
580
|
+
"procedure_id": self.procedure_id,
|
|
581
|
+
"result": validated_result,
|
|
582
|
+
"state": final_state,
|
|
583
|
+
"iterations": (
|
|
584
|
+
self.iterations_primitive.current() if self.iterations_primitive else 0
|
|
585
|
+
),
|
|
586
|
+
"tools_used": tools_used,
|
|
587
|
+
"stop_requested": self.stop_primitive.requested() if self.stop_primitive else False,
|
|
588
|
+
"stop_reason": self.stop_primitive.reason() if self.stop_primitive else None,
|
|
589
|
+
"session_id": session_id,
|
|
590
|
+
"total_cost": total_cost,
|
|
591
|
+
"total_tokens": total_tokens,
|
|
592
|
+
"cost_breakdown": cost_breakdown,
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
except ProcedureWaitingForHuman as e:
|
|
596
|
+
logger.info(f"Procedure waiting for human: {e}")
|
|
597
|
+
|
|
598
|
+
# Flush recordings before exiting
|
|
599
|
+
if self.chat_recorder:
|
|
600
|
+
for agent_primitive in self.agents.values():
|
|
601
|
+
if hasattr(agent_primitive, "flush_recordings"):
|
|
602
|
+
await agent_primitive.flush_recordings()
|
|
603
|
+
|
|
604
|
+
# Note: Procedure status updated by execution context
|
|
605
|
+
# Chat session stays active for resume
|
|
606
|
+
|
|
607
|
+
return {
|
|
608
|
+
"success": False,
|
|
609
|
+
"status": "WAITING_FOR_HUMAN",
|
|
610
|
+
"procedure_id": self.procedure_id,
|
|
611
|
+
"pending_message_id": getattr(e, "pending_message_id", None),
|
|
612
|
+
"message": str(e),
|
|
613
|
+
"session_id": session_id,
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
except ProcedureConfigError as e:
|
|
617
|
+
logger.error(f"Configuration error: {e}")
|
|
618
|
+
# Flush recordings even on error
|
|
619
|
+
if self.chat_recorder and session_id:
|
|
620
|
+
try:
|
|
621
|
+
await self.chat_recorder.end_session(session_id, status="FAILED")
|
|
622
|
+
except Exception as err:
|
|
623
|
+
logger.warning(f"Failed to end chat session: {err}")
|
|
624
|
+
|
|
625
|
+
# Send error summary event if log handler is available
|
|
626
|
+
if self.log_handler:
|
|
627
|
+
import traceback
|
|
628
|
+
from tactus.protocols.models import ExecutionSummaryEvent
|
|
629
|
+
|
|
630
|
+
summary_event = ExecutionSummaryEvent(
|
|
631
|
+
result=None,
|
|
632
|
+
final_state={},
|
|
633
|
+
iterations=0,
|
|
634
|
+
tools_used=[],
|
|
635
|
+
procedure_id=self.procedure_id,
|
|
636
|
+
total_cost=0.0,
|
|
637
|
+
total_tokens=0,
|
|
638
|
+
cost_breakdown=[],
|
|
639
|
+
exit_code=1,
|
|
640
|
+
error_message=str(e),
|
|
641
|
+
error_type=type(e).__name__,
|
|
642
|
+
traceback=traceback.format_exc(),
|
|
643
|
+
)
|
|
644
|
+
self.log_handler.log(summary_event)
|
|
645
|
+
|
|
646
|
+
return {
|
|
647
|
+
"success": False,
|
|
648
|
+
"procedure_id": self.procedure_id,
|
|
649
|
+
"error": f"Configuration error: {e}",
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
except LuaSandboxError as e:
|
|
653
|
+
logger.error(f"Lua execution error: {e}")
|
|
654
|
+
|
|
655
|
+
# Apply error_prompt if specified (future: inject to agent for explanation)
|
|
656
|
+
if self.config and self.config.get("error_prompt"):
|
|
657
|
+
error_prompt = self.config["error_prompt"]
|
|
658
|
+
logger.info(f"Error prompt specified: {error_prompt[:50]}...")
|
|
659
|
+
# TODO: In full implementation, inject this prompt to an agent to get an explanation
|
|
660
|
+
|
|
661
|
+
# Flush recordings even on error
|
|
662
|
+
if self.chat_recorder and session_id:
|
|
663
|
+
try:
|
|
664
|
+
await self.chat_recorder.end_session(session_id, status="FAILED")
|
|
665
|
+
except Exception as err:
|
|
666
|
+
logger.warning(f"Failed to end chat session: {err}")
|
|
667
|
+
|
|
668
|
+
# Send error summary event if log handler is available
|
|
669
|
+
if self.log_handler:
|
|
670
|
+
import traceback
|
|
671
|
+
from tactus.protocols.models import ExecutionSummaryEvent
|
|
672
|
+
|
|
673
|
+
summary_event = ExecutionSummaryEvent(
|
|
674
|
+
result=None,
|
|
675
|
+
final_state={},
|
|
676
|
+
iterations=0,
|
|
677
|
+
tools_used=[],
|
|
678
|
+
procedure_id=self.procedure_id,
|
|
679
|
+
total_cost=0.0,
|
|
680
|
+
total_tokens=0,
|
|
681
|
+
cost_breakdown=[],
|
|
682
|
+
exit_code=1,
|
|
683
|
+
error_message=str(e),
|
|
684
|
+
error_type=type(e).__name__,
|
|
685
|
+
traceback=traceback.format_exc(),
|
|
686
|
+
)
|
|
687
|
+
self.log_handler.log(summary_event)
|
|
688
|
+
|
|
689
|
+
return {
|
|
690
|
+
"success": False,
|
|
691
|
+
"procedure_id": self.procedure_id,
|
|
692
|
+
"error": f"Lua execution error: {e}",
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
except Exception as e:
|
|
696
|
+
logger.error(f"Unexpected error: {e}", exc_info=True)
|
|
697
|
+
|
|
698
|
+
# Apply error_prompt if specified (future: inject to agent for explanation)
|
|
699
|
+
if self.config and self.config.get("error_prompt"):
|
|
700
|
+
error_prompt = self.config["error_prompt"]
|
|
701
|
+
logger.info(f"Error prompt specified: {error_prompt[:50]}...")
|
|
702
|
+
# TODO: In full implementation, inject this prompt to an agent to get an explanation
|
|
703
|
+
|
|
704
|
+
# Flush recordings even on error
|
|
705
|
+
if self.chat_recorder and session_id:
|
|
706
|
+
try:
|
|
707
|
+
await self.chat_recorder.end_session(session_id, status="FAILED")
|
|
708
|
+
except Exception as err:
|
|
709
|
+
logger.warning(f"Failed to end chat session: {err}")
|
|
710
|
+
|
|
711
|
+
# Send error summary event if log handler is available
|
|
712
|
+
if self.log_handler:
|
|
713
|
+
import traceback
|
|
714
|
+
from tactus.protocols.models import ExecutionSummaryEvent
|
|
715
|
+
|
|
716
|
+
summary_event = ExecutionSummaryEvent(
|
|
717
|
+
result=None,
|
|
718
|
+
final_state={},
|
|
719
|
+
iterations=0,
|
|
720
|
+
tools_used=[],
|
|
721
|
+
procedure_id=self.procedure_id,
|
|
722
|
+
total_cost=0.0,
|
|
723
|
+
total_tokens=0,
|
|
724
|
+
cost_breakdown=[],
|
|
725
|
+
exit_code=1,
|
|
726
|
+
error_message=str(e),
|
|
727
|
+
error_type=type(e).__name__,
|
|
728
|
+
traceback=traceback.format_exc(),
|
|
729
|
+
)
|
|
730
|
+
self.log_handler.log(summary_event)
|
|
731
|
+
|
|
732
|
+
return {
|
|
733
|
+
"success": False,
|
|
734
|
+
"procedure_id": self.procedure_id,
|
|
735
|
+
"error": f"Unexpected error: {e}",
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
finally:
|
|
739
|
+
# Cleanup: Disconnect from MCP servers
|
|
740
|
+
if self.mcp_manager:
|
|
741
|
+
try:
|
|
742
|
+
await self.mcp_manager.__aexit__(None, None, None)
|
|
743
|
+
logger.info("Disconnected from MCP servers")
|
|
744
|
+
except Exception as e:
|
|
745
|
+
logger.warning(f"Error disconnecting from MCP servers: {e}")
|
|
746
|
+
|
|
747
|
+
# Cleanup: Close user dependencies
|
|
748
|
+
if self.dependency_manager:
|
|
749
|
+
try:
|
|
750
|
+
await self.dependency_manager.cleanup()
|
|
751
|
+
logger.info("Cleaned up user dependencies")
|
|
752
|
+
except Exception as e:
|
|
753
|
+
logger.warning(f"Error cleaning up dependencies: {e}")
|
|
754
|
+
|
|
755
|
+
async def _initialize_primitives(self, placeholder_tool: Optional[ToolPrimitive] = None):
|
|
756
|
+
"""Initialize all primitive objects.
|
|
757
|
+
|
|
758
|
+
Args:
|
|
759
|
+
placeholder_tool: Optional ToolPrimitive created during DSL parsing.
|
|
760
|
+
If provided, it will be reused to preserve direct tool
|
|
761
|
+
call tracking from ToolHandles.
|
|
762
|
+
"""
|
|
763
|
+
# Get state schema from registry if available
|
|
764
|
+
state_schema = self.registry.state_schema if self.registry else {}
|
|
765
|
+
self.state_primitive = StatePrimitive(state_schema=state_schema)
|
|
766
|
+
self.iterations_primitive = IterationsPrimitive()
|
|
767
|
+
self.stop_primitive = StopPrimitive()
|
|
768
|
+
|
|
769
|
+
# Use injected tool primitive if provided (for testing with mocks)
|
|
770
|
+
if self._injected_tool_primitive:
|
|
771
|
+
self.tool_primitive = self._injected_tool_primitive
|
|
772
|
+
logger.info("Using injected tool primitive (mock mode)")
|
|
773
|
+
elif placeholder_tool:
|
|
774
|
+
# Reuse placeholder_tool so direct tool calls from ToolHandles are tracked
|
|
775
|
+
self.tool_primitive = placeholder_tool
|
|
776
|
+
logger.debug("Reusing placeholder tool primitive for direct tool call tracking")
|
|
777
|
+
else:
|
|
778
|
+
self.tool_primitive = ToolPrimitive(
|
|
779
|
+
log_handler=self.log_handler, procedure_id=self.procedure_id
|
|
780
|
+
)
|
|
781
|
+
|
|
782
|
+
# Connect tool primitive to runtime for Tool.get() support
|
|
783
|
+
self.tool_primitive.set_runtime(self)
|
|
784
|
+
|
|
785
|
+
# Initialize toolset primitive (needs runtime reference for resolution)
|
|
786
|
+
from tactus.primitives.toolset import ToolsetPrimitive
|
|
787
|
+
|
|
788
|
+
self.toolset_primitive = ToolsetPrimitive(runtime=self)
|
|
789
|
+
|
|
790
|
+
logger.debug("All primitives initialized")
|
|
791
|
+
|
|
792
|
+
def resolve_toolset(self, name: str) -> Optional[Any]:
|
|
793
|
+
"""
|
|
794
|
+
Resolve a toolset by name from runtime's registered toolsets.
|
|
795
|
+
|
|
796
|
+
This is called by ToolsetPrimitive.get() and agent setup to look up toolsets.
|
|
797
|
+
|
|
798
|
+
Args:
|
|
799
|
+
name: Toolset name to resolve
|
|
800
|
+
|
|
801
|
+
Returns:
|
|
802
|
+
AbstractToolset instance or None if not found
|
|
803
|
+
"""
|
|
804
|
+
toolset = self.toolset_registry.get(name)
|
|
805
|
+
if toolset:
|
|
806
|
+
logger.debug(f"Resolved toolset '{name}' from registry")
|
|
807
|
+
return toolset
|
|
808
|
+
else:
|
|
809
|
+
logger.warning(
|
|
810
|
+
f"Toolset '{name}' not found in registry. Available: {list(self.toolset_registry.keys())}"
|
|
811
|
+
)
|
|
812
|
+
return None
|
|
813
|
+
|
|
814
|
+
async def _initialize_toolsets(self):
|
|
815
|
+
"""
|
|
816
|
+
Load and register all toolsets from config and DSL-defined sources.
|
|
817
|
+
|
|
818
|
+
This method:
|
|
819
|
+
1. Loads config-defined toolsets from YAML
|
|
820
|
+
2. Registers MCP toolsets by server name
|
|
821
|
+
3. Registers plugin toolset if tool_paths configured
|
|
822
|
+
4. Registers DSL-defined toolsets (from tool() declarations)
|
|
823
|
+
|
|
824
|
+
Note: There are no built-in toolsets. Programmers must define their own
|
|
825
|
+
tools using tool() declarations in their .tac files.
|
|
826
|
+
"""
|
|
827
|
+
logger.info(
|
|
828
|
+
f"Starting _initialize_toolsets, registry has {len(self.registry.lua_tools) if self.registry else 0} lua_tools"
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
# 1. Load config-defined toolsets
|
|
832
|
+
config_toolsets = self.config.get("toolsets", {})
|
|
833
|
+
for name, definition in config_toolsets.items():
|
|
834
|
+
try:
|
|
835
|
+
toolset = await self._create_toolset_from_config(name, definition)
|
|
836
|
+
if toolset:
|
|
837
|
+
self.toolset_registry[name] = toolset
|
|
838
|
+
logger.info(f"Registered config-defined toolset '{name}'")
|
|
839
|
+
except Exception as e:
|
|
840
|
+
logger.error(f"Failed to create toolset '{name}' from config: {e}", exc_info=True)
|
|
841
|
+
|
|
842
|
+
# 3. Register MCP toolsets by server name
|
|
843
|
+
if self.mcp_servers:
|
|
844
|
+
try:
|
|
845
|
+
from tactus.adapters.mcp_manager import MCPServerManager
|
|
846
|
+
|
|
847
|
+
self.mcp_manager = MCPServerManager(
|
|
848
|
+
self.mcp_servers, tool_primitive=self.tool_primitive
|
|
849
|
+
)
|
|
850
|
+
await self.mcp_manager.__aenter__()
|
|
851
|
+
|
|
852
|
+
# Register each MCP toolset by server name
|
|
853
|
+
for server_name in self.mcp_servers.keys():
|
|
854
|
+
# Get the toolset for this specific server
|
|
855
|
+
toolset = self.mcp_manager.get_toolset_by_name(server_name)
|
|
856
|
+
if toolset:
|
|
857
|
+
self.toolset_registry[server_name] = toolset
|
|
858
|
+
logger.info(f"Registered MCP toolset '{server_name}'")
|
|
859
|
+
|
|
860
|
+
# Get all toolsets for logging
|
|
861
|
+
mcp_toolsets = self.mcp_manager.get_toolsets()
|
|
862
|
+
logger.info(f"Connected to {len(mcp_toolsets)} MCP server(s)")
|
|
863
|
+
except Exception as e:
|
|
864
|
+
# Check if this is a fileno error (common in test environments with redirected stderr)
|
|
865
|
+
error_str = str(e)
|
|
866
|
+
if "fileno" in error_str or isinstance(e, io.UnsupportedOperation):
|
|
867
|
+
logger.warning(
|
|
868
|
+
"MCP server initialization skipped (test environment with redirected streams)"
|
|
869
|
+
)
|
|
870
|
+
else:
|
|
871
|
+
logger.error(f"Failed to initialize MCP toolsets: {e}", exc_info=True)
|
|
872
|
+
|
|
873
|
+
# 4. Register plugin toolset if tool_paths configured
|
|
874
|
+
if self.tool_paths:
|
|
875
|
+
try:
|
|
876
|
+
from tactus.adapters.plugins import PluginLoader
|
|
877
|
+
|
|
878
|
+
plugin_loader = PluginLoader(tool_primitive=self.tool_primitive)
|
|
879
|
+
plugin_toolset = plugin_loader.create_toolset(self.tool_paths, name="plugin")
|
|
880
|
+
self.toolset_registry["plugin"] = plugin_toolset
|
|
881
|
+
logger.info(f"Registered plugin toolset from {len(self.tool_paths)} path(s)")
|
|
882
|
+
except ImportError as e:
|
|
883
|
+
logger.warning(
|
|
884
|
+
f"Could not import PluginLoader: {e} - local tools will not be available"
|
|
885
|
+
)
|
|
886
|
+
except Exception as e:
|
|
887
|
+
logger.error(f"Failed to create plugin toolset: {e}", exc_info=True)
|
|
888
|
+
|
|
889
|
+
# 5. Register individual Lua tool() declarations BEFORE toolsets that reference them
|
|
890
|
+
logger.info(
|
|
891
|
+
f"Checking for Lua tools: has registry={hasattr(self, 'registry')}, registry not None={self.registry is not None if hasattr(self, 'registry') else False}"
|
|
892
|
+
)
|
|
893
|
+
if hasattr(self, "registry") and self.registry and hasattr(self.registry, "lua_tools"):
|
|
894
|
+
logger.info(f"Found {len(self.registry.lua_tools)} Lua tools to register")
|
|
895
|
+
try:
|
|
896
|
+
from tactus.adapters.lua_tools import LuaToolsAdapter
|
|
897
|
+
|
|
898
|
+
lua_adapter = LuaToolsAdapter(
|
|
899
|
+
tool_primitive=self.tool_primitive, mock_manager=self.mock_manager
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
for tool_name, tool_spec in self.registry.lua_tools.items():
|
|
903
|
+
try:
|
|
904
|
+
# Check if this tool references an external source
|
|
905
|
+
source = tool_spec.get("source")
|
|
906
|
+
logger.info(
|
|
907
|
+
f"Processing Lua tool '{tool_name}': source={source}, spec keys={list(tool_spec.keys())}"
|
|
908
|
+
)
|
|
909
|
+
if source:
|
|
910
|
+
# Resolve the external tool
|
|
911
|
+
resolved_tool = await self._resolve_tool_source(tool_name, source)
|
|
912
|
+
if resolved_tool:
|
|
913
|
+
self.toolset_registry[tool_name] = resolved_tool
|
|
914
|
+
logger.info(f"Registered tool '{tool_name}' from source '{source}'")
|
|
915
|
+
# Debug: print the actual tool
|
|
916
|
+
logger.debug(f"Tool object: {resolved_tool}")
|
|
917
|
+
else:
|
|
918
|
+
logger.error(
|
|
919
|
+
f"Failed to resolve tool '{tool_name}' from source '{source}'"
|
|
920
|
+
)
|
|
921
|
+
else:
|
|
922
|
+
# Regular inline Lua tool
|
|
923
|
+
toolset = lua_adapter.create_single_tool_toolset(tool_name, tool_spec)
|
|
924
|
+
self.toolset_registry[tool_name] = toolset
|
|
925
|
+
logger.info(f"Registered Lua tool '{tool_name}' as toolset")
|
|
926
|
+
except Exception as e:
|
|
927
|
+
logger.error(f"Failed to create Lua tool '{tool_name}': {e}", exc_info=True)
|
|
928
|
+
except ImportError as e:
|
|
929
|
+
logger.warning(
|
|
930
|
+
f"Could not import LuaToolsAdapter: {e} - Lua tools will not be available"
|
|
931
|
+
)
|
|
932
|
+
|
|
933
|
+
# 6. Register DSL-defined toolsets from registry (after individual tools are registered)
|
|
934
|
+
if hasattr(self, "registry") and self.registry and hasattr(self.registry, "toolsets"):
|
|
935
|
+
for name, definition in self.registry.toolsets.items():
|
|
936
|
+
try:
|
|
937
|
+
toolset = await self._create_toolset_from_config(name, definition)
|
|
938
|
+
if toolset:
|
|
939
|
+
self.toolset_registry[name] = toolset
|
|
940
|
+
logger.info(f"Registered DSL-defined toolset '{name}'")
|
|
941
|
+
except Exception as e:
|
|
942
|
+
logger.error(f"Failed to create DSL toolset '{name}': {e}", exc_info=True)
|
|
943
|
+
|
|
944
|
+
logger.info(
|
|
945
|
+
f"Toolset registry initialized with {len(self.toolset_registry)} toolset(s): {list(self.toolset_registry.keys())}"
|
|
946
|
+
)
|
|
947
|
+
|
|
948
|
+
# Debug: Print what's in the toolset registry
|
|
949
|
+
for name, toolset in self.toolset_registry.items():
|
|
950
|
+
logger.debug(f" - {name}: {type(toolset)} -> {toolset}")
|
|
951
|
+
|
|
952
|
+
async def _resolve_tool_source(self, tool_name: str, source: str) -> Optional[Any]:
|
|
953
|
+
"""
|
|
954
|
+
Resolve a tool from an external source.
|
|
955
|
+
|
|
956
|
+
Args:
|
|
957
|
+
tool_name: Name of the tool
|
|
958
|
+
source: Source identifier (e.g., "./file.tac", "mcp.server")
|
|
959
|
+
|
|
960
|
+
Returns:
|
|
961
|
+
Toolset containing the resolved tool, or None if not found
|
|
962
|
+
|
|
963
|
+
Note:
|
|
964
|
+
Standard library tools (tactus.tools.*) should be loaded via require()
|
|
965
|
+
in the .tac file, not via this method.
|
|
966
|
+
"""
|
|
967
|
+
# Handle local .tac file imports (./path/file.tac)
|
|
968
|
+
if source.startswith("./") or source.startswith("/"):
|
|
969
|
+
from pathlib import Path
|
|
970
|
+
|
|
971
|
+
# Resolve path relative to the source file if available
|
|
972
|
+
if self.source_file_path and source.startswith("./"):
|
|
973
|
+
base_dir = Path(self.source_file_path).parent
|
|
974
|
+
file_path = base_dir / source[2:] # Remove "./" prefix
|
|
975
|
+
else:
|
|
976
|
+
file_path = Path(source)
|
|
977
|
+
|
|
978
|
+
# Check if file exists
|
|
979
|
+
if not file_path.exists():
|
|
980
|
+
logger.error(f"Tool source file not found: {file_path}")
|
|
981
|
+
return None
|
|
982
|
+
|
|
983
|
+
if not file_path.suffix == ".tac":
|
|
984
|
+
logger.error(f"Tool source must be a .tac file: {file_path}")
|
|
985
|
+
return None
|
|
986
|
+
|
|
987
|
+
try:
|
|
988
|
+
# Read and parse the .tac file
|
|
989
|
+
with open(file_path, "r") as f:
|
|
990
|
+
content = f.read()
|
|
991
|
+
|
|
992
|
+
# Create a sub-runtime to load the tools from the file
|
|
993
|
+
# We'll parse the file and extract tool definitions
|
|
994
|
+
# Parse the file content using the DSL
|
|
995
|
+
lua_runtime = self.sandbox.runtime
|
|
996
|
+
lua_runtime.execute(content)
|
|
997
|
+
|
|
998
|
+
# Get registered tools from the builder
|
|
999
|
+
# Note: This assumes tools are registered globally during parsing
|
|
1000
|
+
# We may need to enhance this to properly isolate tool loading
|
|
1001
|
+
logger.info(f"Loaded tools from {file_path}")
|
|
1002
|
+
|
|
1003
|
+
# For now, return None as we need to implement proper tool extraction
|
|
1004
|
+
# This will be enhanced in the next iteration
|
|
1005
|
+
logger.warning("Tool extraction from .tac files needs enhancement")
|
|
1006
|
+
return None
|
|
1007
|
+
|
|
1008
|
+
except Exception as e:
|
|
1009
|
+
logger.error(f"Failed to load tools from {file_path}: {e}", exc_info=True)
|
|
1010
|
+
return None
|
|
1011
|
+
|
|
1012
|
+
# Handle MCP server tools (mcp.*)
|
|
1013
|
+
elif source.startswith("mcp."):
|
|
1014
|
+
server_name = source[4:] # Remove "mcp." prefix
|
|
1015
|
+
# Look for the MCP server toolset
|
|
1016
|
+
if server_name in self.toolset_registry:
|
|
1017
|
+
return self.toolset_registry[server_name]
|
|
1018
|
+
else:
|
|
1019
|
+
logger.error(f"MCP server '{server_name}' not found in registry")
|
|
1020
|
+
return None
|
|
1021
|
+
|
|
1022
|
+
# Handle plugin tools (plugin.*)
|
|
1023
|
+
elif source.startswith("plugin."):
|
|
1024
|
+
plugin_path = source[7:] # Remove "plugin." prefix
|
|
1025
|
+
try:
|
|
1026
|
+
# Split the plugin path into module and function
|
|
1027
|
+
parts = plugin_path.rsplit(".", 1)
|
|
1028
|
+
if len(parts) != 2:
|
|
1029
|
+
logger.error(
|
|
1030
|
+
f"Invalid plugin path format: {source} (expected plugin.module.function)"
|
|
1031
|
+
)
|
|
1032
|
+
return None
|
|
1033
|
+
|
|
1034
|
+
module_name, func_name = parts
|
|
1035
|
+
|
|
1036
|
+
# Try to import the module
|
|
1037
|
+
import importlib
|
|
1038
|
+
|
|
1039
|
+
try:
|
|
1040
|
+
module = importlib.import_module(module_name)
|
|
1041
|
+
except ModuleNotFoundError:
|
|
1042
|
+
# Try with "tactus.plugins." prefix
|
|
1043
|
+
try:
|
|
1044
|
+
module = importlib.import_module(f"tactus.plugins.{module_name}")
|
|
1045
|
+
except ModuleNotFoundError:
|
|
1046
|
+
logger.error(f"Plugin module not found: {module_name}")
|
|
1047
|
+
return None
|
|
1048
|
+
|
|
1049
|
+
# Get the function from the module
|
|
1050
|
+
if not hasattr(module, func_name):
|
|
1051
|
+
logger.error(f"Function '{func_name}' not found in module '{module_name}'")
|
|
1052
|
+
return None
|
|
1053
|
+
|
|
1054
|
+
tool_func = getattr(module, func_name)
|
|
1055
|
+
|
|
1056
|
+
# Create a toolset with the plugin tool
|
|
1057
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
1058
|
+
from pydantic_ai import Tool
|
|
1059
|
+
|
|
1060
|
+
# Create tracking wrapper
|
|
1061
|
+
tool_primitive = self.tool_primitive
|
|
1062
|
+
|
|
1063
|
+
def tracked_plugin_tool(**kwargs):
|
|
1064
|
+
"""Wrapper that tracks plugin tool calls."""
|
|
1065
|
+
logger.debug(f"Plugin tool '{tool_name}' called with: {kwargs}")
|
|
1066
|
+
|
|
1067
|
+
# Check for mock response first
|
|
1068
|
+
if self.mock_manager:
|
|
1069
|
+
mock_result = self.mock_manager.get_mock_response(tool_name, kwargs)
|
|
1070
|
+
if mock_result is not None:
|
|
1071
|
+
logger.debug(f"Using mock response for '{tool_name}': {mock_result}")
|
|
1072
|
+
if tool_primitive:
|
|
1073
|
+
tool_primitive.record_call(tool_name, kwargs, mock_result)
|
|
1074
|
+
if self.mock_manager:
|
|
1075
|
+
self.mock_manager.record_call(tool_name, kwargs, mock_result)
|
|
1076
|
+
return mock_result
|
|
1077
|
+
|
|
1078
|
+
# Call the plugin function
|
|
1079
|
+
result = tool_func(**kwargs)
|
|
1080
|
+
logger.debug(f"Plugin tool '{tool_name}' returned: {result}")
|
|
1081
|
+
|
|
1082
|
+
# Track the call
|
|
1083
|
+
if tool_primitive:
|
|
1084
|
+
tool_primitive.record_call(tool_name, kwargs, result)
|
|
1085
|
+
if self.mock_manager:
|
|
1086
|
+
self.mock_manager.record_call(tool_name, kwargs, result)
|
|
1087
|
+
|
|
1088
|
+
return result
|
|
1089
|
+
|
|
1090
|
+
# Copy metadata
|
|
1091
|
+
tracked_plugin_tool.__name__ = tool_name
|
|
1092
|
+
tracked_plugin_tool.__doc__ = getattr(
|
|
1093
|
+
tool_func, "__doc__", f"Plugin tool: {tool_name}"
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
# Create and return toolset
|
|
1097
|
+
wrapped_tool = Tool(tracked_plugin_tool, name=tool_name)
|
|
1098
|
+
toolset = FunctionToolset(tools=[wrapped_tool])
|
|
1099
|
+
logger.info(f"Loaded plugin tool '{tool_name}' from {module_name}.{func_name}")
|
|
1100
|
+
return toolset
|
|
1101
|
+
|
|
1102
|
+
except Exception as e:
|
|
1103
|
+
logger.error(f"Failed to load plugin tool '{source}': {e}", exc_info=True)
|
|
1104
|
+
return None
|
|
1105
|
+
|
|
1106
|
+
# Handle CLI tools (cli.*)
|
|
1107
|
+
elif source.startswith("cli."):
|
|
1108
|
+
cli_command = source[4:] # Remove "cli." prefix
|
|
1109
|
+
try:
|
|
1110
|
+
import subprocess
|
|
1111
|
+
import json
|
|
1112
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
1113
|
+
from pydantic_ai import Tool
|
|
1114
|
+
|
|
1115
|
+
# Create tracking wrapper
|
|
1116
|
+
tool_primitive = self.tool_primitive
|
|
1117
|
+
|
|
1118
|
+
def cli_tool_wrapper(**kwargs):
|
|
1119
|
+
"""Wrapper that executes CLI commands."""
|
|
1120
|
+
logger.debug(f"CLI tool '{tool_name}' called with: {kwargs}")
|
|
1121
|
+
|
|
1122
|
+
# Check for mock response first
|
|
1123
|
+
if self.mock_manager:
|
|
1124
|
+
mock_result = self.mock_manager.get_mock_response(tool_name, kwargs)
|
|
1125
|
+
if mock_result is not None:
|
|
1126
|
+
logger.debug(f"Using mock response for '{tool_name}': {mock_result}")
|
|
1127
|
+
if tool_primitive:
|
|
1128
|
+
tool_primitive.record_call(tool_name, kwargs, mock_result)
|
|
1129
|
+
if self.mock_manager:
|
|
1130
|
+
self.mock_manager.record_call(tool_name, kwargs, mock_result)
|
|
1131
|
+
return mock_result
|
|
1132
|
+
|
|
1133
|
+
# Build command line
|
|
1134
|
+
cmd = [cli_command]
|
|
1135
|
+
|
|
1136
|
+
# Add arguments from kwargs
|
|
1137
|
+
# Common patterns:
|
|
1138
|
+
# - Boolean flags: {"verbose": True} -> ["--verbose"]
|
|
1139
|
+
# - String args: {"file": "test.txt"} -> ["--file", "test.txt"]
|
|
1140
|
+
# - Positional: {"args": ["arg1", "arg2"]} -> ["arg1", "arg2"]
|
|
1141
|
+
|
|
1142
|
+
for key, value in kwargs.items():
|
|
1143
|
+
if key == "args" and isinstance(value, list):
|
|
1144
|
+
# Positional arguments
|
|
1145
|
+
cmd.extend(value)
|
|
1146
|
+
elif isinstance(value, bool):
|
|
1147
|
+
if value:
|
|
1148
|
+
# Boolean flag
|
|
1149
|
+
flag = f"--{key.replace('_', '-')}"
|
|
1150
|
+
cmd.append(flag)
|
|
1151
|
+
elif value is not None:
|
|
1152
|
+
# Key-value argument
|
|
1153
|
+
flag = f"--{key.replace('_', '-')}"
|
|
1154
|
+
cmd.extend([flag, str(value)])
|
|
1155
|
+
|
|
1156
|
+
logger.debug(f"Executing CLI command: {' '.join(cmd)}")
|
|
1157
|
+
|
|
1158
|
+
try:
|
|
1159
|
+
# Execute the command
|
|
1160
|
+
result = subprocess.run(
|
|
1161
|
+
cmd,
|
|
1162
|
+
capture_output=True,
|
|
1163
|
+
text=True,
|
|
1164
|
+
check=False,
|
|
1165
|
+
timeout=30, # 30 second timeout
|
|
1166
|
+
)
|
|
1167
|
+
|
|
1168
|
+
# Prepare response
|
|
1169
|
+
response = {
|
|
1170
|
+
"stdout": result.stdout,
|
|
1171
|
+
"stderr": result.stderr,
|
|
1172
|
+
"returncode": result.returncode,
|
|
1173
|
+
"success": result.returncode == 0,
|
|
1174
|
+
}
|
|
1175
|
+
|
|
1176
|
+
# Try to parse JSON output if possible
|
|
1177
|
+
if result.stdout.strip().startswith(
|
|
1178
|
+
"{"
|
|
1179
|
+
) or result.stdout.strip().startswith("["):
|
|
1180
|
+
try:
|
|
1181
|
+
response["json"] = json.loads(result.stdout)
|
|
1182
|
+
except json.JSONDecodeError:
|
|
1183
|
+
pass
|
|
1184
|
+
|
|
1185
|
+
logger.debug(f"CLI tool '{tool_name}' returned: {response}")
|
|
1186
|
+
|
|
1187
|
+
# Track the call
|
|
1188
|
+
if tool_primitive:
|
|
1189
|
+
tool_primitive.record_call(tool_name, kwargs, response)
|
|
1190
|
+
if self.mock_manager:
|
|
1191
|
+
self.mock_manager.record_call(tool_name, kwargs, response)
|
|
1192
|
+
|
|
1193
|
+
return response
|
|
1194
|
+
|
|
1195
|
+
except subprocess.TimeoutExpired:
|
|
1196
|
+
error_response = {
|
|
1197
|
+
"error": "Command timed out after 30 seconds",
|
|
1198
|
+
"success": False,
|
|
1199
|
+
}
|
|
1200
|
+
if tool_primitive:
|
|
1201
|
+
tool_primitive.record_call(tool_name, kwargs, error_response)
|
|
1202
|
+
return error_response
|
|
1203
|
+
|
|
1204
|
+
except Exception as e:
|
|
1205
|
+
error_response = {"error": str(e), "success": False}
|
|
1206
|
+
if tool_primitive:
|
|
1207
|
+
tool_primitive.record_call(tool_name, kwargs, error_response)
|
|
1208
|
+
return error_response
|
|
1209
|
+
|
|
1210
|
+
# Set metadata
|
|
1211
|
+
cli_tool_wrapper.__name__ = tool_name
|
|
1212
|
+
cli_tool_wrapper.__doc__ = f"CLI tool wrapper for: {cli_command}"
|
|
1213
|
+
|
|
1214
|
+
# Create and return toolset
|
|
1215
|
+
wrapped_tool = Tool(cli_tool_wrapper, name=tool_name)
|
|
1216
|
+
toolset = FunctionToolset(tools=[wrapped_tool])
|
|
1217
|
+
logger.info(f"Created CLI tool wrapper for '{cli_command}'")
|
|
1218
|
+
return toolset
|
|
1219
|
+
|
|
1220
|
+
except Exception as e:
|
|
1221
|
+
logger.error(f"Failed to create CLI tool wrapper '{source}': {e}", exc_info=True)
|
|
1222
|
+
return None
|
|
1223
|
+
|
|
1224
|
+
# Handle broker host tools (broker.*)
|
|
1225
|
+
elif source.startswith("broker."):
|
|
1226
|
+
broker_tool = source[7:] # Remove "broker." prefix
|
|
1227
|
+
if not broker_tool:
|
|
1228
|
+
logger.error(
|
|
1229
|
+
f"Invalid broker tool source for '{tool_name}': {source} (expected broker.<tool>)"
|
|
1230
|
+
)
|
|
1231
|
+
return None
|
|
1232
|
+
|
|
1233
|
+
try:
|
|
1234
|
+
from pydantic_ai import Tool
|
|
1235
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
1236
|
+
|
|
1237
|
+
tool_primitive = self.tool_primitive
|
|
1238
|
+
host_primitive = self.host_primitive
|
|
1239
|
+
mock_manager = self.mock_manager
|
|
1240
|
+
|
|
1241
|
+
def broker_tool_wrapper(**kwargs: Any):
|
|
1242
|
+
if mock_manager:
|
|
1243
|
+
mock_result = mock_manager.get_mock_response(tool_name, kwargs)
|
|
1244
|
+
if mock_result is not None:
|
|
1245
|
+
if tool_primitive:
|
|
1246
|
+
tool_primitive.record_call(tool_name, kwargs, mock_result)
|
|
1247
|
+
mock_manager.record_call(tool_name, kwargs, mock_result)
|
|
1248
|
+
return mock_result
|
|
1249
|
+
|
|
1250
|
+
result = host_primitive.call(broker_tool, kwargs)
|
|
1251
|
+
|
|
1252
|
+
if tool_primitive:
|
|
1253
|
+
tool_primitive.record_call(tool_name, kwargs, result)
|
|
1254
|
+
if mock_manager:
|
|
1255
|
+
mock_manager.record_call(tool_name, kwargs, result)
|
|
1256
|
+
|
|
1257
|
+
return result
|
|
1258
|
+
|
|
1259
|
+
broker_tool_wrapper.__name__ = tool_name
|
|
1260
|
+
broker_tool_wrapper.__doc__ = f"Brokered host tool: {broker_tool}"
|
|
1261
|
+
|
|
1262
|
+
wrapped_tool = Tool(broker_tool_wrapper, name=tool_name)
|
|
1263
|
+
return FunctionToolset(tools=[wrapped_tool])
|
|
1264
|
+
|
|
1265
|
+
except Exception as e:
|
|
1266
|
+
logger.error(
|
|
1267
|
+
f"Failed to create broker tool '{tool_name}' from source '{source}': {e}",
|
|
1268
|
+
exc_info=True,
|
|
1269
|
+
)
|
|
1270
|
+
return None
|
|
1271
|
+
|
|
1272
|
+
else:
|
|
1273
|
+
logger.error(f"Unknown tool source format: {source}")
|
|
1274
|
+
return None
|
|
1275
|
+
|
|
1276
|
+
async def _initialize_named_procedures(self):
|
|
1277
|
+
"""
|
|
1278
|
+
Initialize named procedure callables and inject them into Lua sandbox.
|
|
1279
|
+
|
|
1280
|
+
Converts named procedure registrations into ProcedureCallable instances
|
|
1281
|
+
that can be called directly from Lua code with automatic checkpointing.
|
|
1282
|
+
"""
|
|
1283
|
+
if not self.registry or not self.registry.named_procedures:
|
|
1284
|
+
logger.debug("No named procedures to initialize")
|
|
1285
|
+
return
|
|
1286
|
+
|
|
1287
|
+
from tactus.primitives.procedure_callable import ProcedureCallable
|
|
1288
|
+
|
|
1289
|
+
for proc_name, proc_def in self.registry.named_procedures.items():
|
|
1290
|
+
try:
|
|
1291
|
+
logger.debug(
|
|
1292
|
+
f"Processing named procedure '{proc_name}': "
|
|
1293
|
+
f"function={proc_def['function']}, type={type(proc_def['function'])}"
|
|
1294
|
+
)
|
|
1295
|
+
|
|
1296
|
+
# Create callable wrapper
|
|
1297
|
+
callable_wrapper = ProcedureCallable(
|
|
1298
|
+
name=proc_name,
|
|
1299
|
+
procedure_function=proc_def["function"],
|
|
1300
|
+
input_schema=proc_def["input_schema"],
|
|
1301
|
+
output_schema=proc_def["output_schema"],
|
|
1302
|
+
state_schema=proc_def["state_schema"],
|
|
1303
|
+
execution_context=self.execution_context,
|
|
1304
|
+
lua_sandbox=self.lua_sandbox,
|
|
1305
|
+
)
|
|
1306
|
+
|
|
1307
|
+
# Get the old stub (if it exists) to update its registry
|
|
1308
|
+
try:
|
|
1309
|
+
old_value = self.lua_sandbox.lua.globals()[proc_name]
|
|
1310
|
+
if old_value and hasattr(old_value, "registry"):
|
|
1311
|
+
# Update the stub's registry so it delegates to the real callable
|
|
1312
|
+
old_value.registry[proc_name] = callable_wrapper
|
|
1313
|
+
except (KeyError, AttributeError):
|
|
1314
|
+
# Stub doesn't exist in globals yet, that's fine
|
|
1315
|
+
pass
|
|
1316
|
+
|
|
1317
|
+
# Inject into Lua globals (replaces placeholder)
|
|
1318
|
+
self.lua_sandbox.lua.globals()[proc_name] = callable_wrapper
|
|
1319
|
+
|
|
1320
|
+
logger.info(f"Registered named procedure: {proc_name}")
|
|
1321
|
+
except Exception as e:
|
|
1322
|
+
logger.error(
|
|
1323
|
+
f"Failed to initialize named procedure '{proc_name}': {e}",
|
|
1324
|
+
exc_info=True,
|
|
1325
|
+
)
|
|
1326
|
+
|
|
1327
|
+
logger.info(f"Initialized {len(self.registry.named_procedures)} named procedure(s)")
|
|
1328
|
+
|
|
1329
|
+
async def _create_toolset_from_config(
|
|
1330
|
+
self, name: str, definition: Dict[str, Any]
|
|
1331
|
+
) -> Optional[Any]:
|
|
1332
|
+
"""
|
|
1333
|
+
Create toolset from YAML config definition.
|
|
1334
|
+
|
|
1335
|
+
Supports toolset types:
|
|
1336
|
+
- plugin: Load from local Python files
|
|
1337
|
+
- lua: Lua function tools
|
|
1338
|
+
- mcp: Reference MCP server toolset
|
|
1339
|
+
- filtered: Filter tools from source toolset
|
|
1340
|
+
- combined: Merge multiple toolsets
|
|
1341
|
+
- builtin: Custom built-in toolset
|
|
1342
|
+
|
|
1343
|
+
Args:
|
|
1344
|
+
name: Toolset name
|
|
1345
|
+
definition: Config dict with 'type' and type-specific fields
|
|
1346
|
+
|
|
1347
|
+
Returns:
|
|
1348
|
+
AbstractToolset instance or None if creation fails
|
|
1349
|
+
"""
|
|
1350
|
+
import re
|
|
1351
|
+
from pydantic_ai.toolsets import CombinedToolset
|
|
1352
|
+
|
|
1353
|
+
toolset_type = definition.get("type")
|
|
1354
|
+
|
|
1355
|
+
if toolset_type == "lua":
|
|
1356
|
+
# Lua function toolset
|
|
1357
|
+
try:
|
|
1358
|
+
from tactus.adapters.lua_tools import LuaToolsAdapter
|
|
1359
|
+
|
|
1360
|
+
lua_adapter = LuaToolsAdapter(
|
|
1361
|
+
tool_primitive=self.tool_primitive, mock_manager=self.mock_manager
|
|
1362
|
+
)
|
|
1363
|
+
return lua_adapter.create_lua_toolset(name, definition)
|
|
1364
|
+
except ImportError as e:
|
|
1365
|
+
logger.error(f"Could not import LuaToolsAdapter: {e}")
|
|
1366
|
+
return None
|
|
1367
|
+
|
|
1368
|
+
if toolset_type == "plugin":
|
|
1369
|
+
# Load from local paths
|
|
1370
|
+
paths = definition.get("paths", [])
|
|
1371
|
+
if not paths:
|
|
1372
|
+
logger.warning(f"Plugin toolset '{name}' has no paths configured")
|
|
1373
|
+
return None
|
|
1374
|
+
|
|
1375
|
+
from tactus.adapters.plugins import PluginLoader
|
|
1376
|
+
|
|
1377
|
+
plugin_loader = PluginLoader(tool_primitive=self.tool_primitive)
|
|
1378
|
+
return plugin_loader.create_toolset(paths, name=name)
|
|
1379
|
+
|
|
1380
|
+
elif toolset_type == "mcp":
|
|
1381
|
+
# Reference MCP server by name
|
|
1382
|
+
server_name = definition.get("server")
|
|
1383
|
+
if not server_name:
|
|
1384
|
+
logger.error(f"MCP toolset '{name}' missing 'server' field")
|
|
1385
|
+
return None
|
|
1386
|
+
|
|
1387
|
+
# Return reference to MCP toolset (will be resolved after MCP init)
|
|
1388
|
+
return self.resolve_toolset(server_name)
|
|
1389
|
+
|
|
1390
|
+
elif toolset_type == "filtered":
|
|
1391
|
+
# Filter tools from source toolset
|
|
1392
|
+
source_name = definition.get("source")
|
|
1393
|
+
pattern = definition.get("pattern")
|
|
1394
|
+
|
|
1395
|
+
if not source_name:
|
|
1396
|
+
logger.error(f"Filtered toolset '{name}' missing 'source' field")
|
|
1397
|
+
return None
|
|
1398
|
+
|
|
1399
|
+
source_toolset = self.resolve_toolset(source_name)
|
|
1400
|
+
if not source_toolset:
|
|
1401
|
+
logger.error(f"Filtered toolset '{name}' cannot find source '{source_name}'")
|
|
1402
|
+
return None
|
|
1403
|
+
|
|
1404
|
+
if pattern:
|
|
1405
|
+
# Filter by regex pattern
|
|
1406
|
+
return source_toolset.filtered(lambda ctx, tool: re.match(pattern, tool.name))
|
|
1407
|
+
else:
|
|
1408
|
+
logger.warning(f"Filtered toolset '{name}' has no filter pattern")
|
|
1409
|
+
return source_toolset
|
|
1410
|
+
|
|
1411
|
+
elif toolset_type == "combined":
|
|
1412
|
+
# Merge multiple toolsets
|
|
1413
|
+
sources = definition.get("sources", [])
|
|
1414
|
+
if not sources:
|
|
1415
|
+
logger.warning(f"Combined toolset '{name}' has no sources")
|
|
1416
|
+
return None
|
|
1417
|
+
|
|
1418
|
+
toolsets = []
|
|
1419
|
+
for source_name in sources:
|
|
1420
|
+
source = self.resolve_toolset(source_name)
|
|
1421
|
+
if source:
|
|
1422
|
+
toolsets.append(source)
|
|
1423
|
+
else:
|
|
1424
|
+
logger.warning(f"Combined toolset '{name}' cannot find source '{source_name}'")
|
|
1425
|
+
|
|
1426
|
+
if toolsets:
|
|
1427
|
+
return CombinedToolset(toolsets)
|
|
1428
|
+
else:
|
|
1429
|
+
logger.error(f"Combined toolset '{name}' has no valid sources")
|
|
1430
|
+
return None
|
|
1431
|
+
|
|
1432
|
+
elif toolset_type == "builtin":
|
|
1433
|
+
# Custom built-in toolset (for future extension)
|
|
1434
|
+
logger.warning(f"Builtin toolset type for '{name}' not yet implemented")
|
|
1435
|
+
return None
|
|
1436
|
+
|
|
1437
|
+
else:
|
|
1438
|
+
# Check if this is a DSL-defined toolset (no explicit type)
|
|
1439
|
+
# DSL toolsets can have:
|
|
1440
|
+
# - "tools" field with list of tool names or inline tool definitions
|
|
1441
|
+
# - "use" field to import from a file or other source
|
|
1442
|
+
|
|
1443
|
+
if "tools" in definition:
|
|
1444
|
+
# Handle tools list (can be tool names or inline definitions)
|
|
1445
|
+
tools_list = definition["tools"]
|
|
1446
|
+
|
|
1447
|
+
# Check if we have inline tool definitions (dicts with a Lua handler)
|
|
1448
|
+
has_inline_tools = False
|
|
1449
|
+
if isinstance(tools_list, list):
|
|
1450
|
+
for item in tools_list:
|
|
1451
|
+
if isinstance(item, dict) and (
|
|
1452
|
+
"handler" in item or (1 in item and callable(item.get(1)))
|
|
1453
|
+
):
|
|
1454
|
+
has_inline_tools = True
|
|
1455
|
+
break
|
|
1456
|
+
|
|
1457
|
+
if has_inline_tools:
|
|
1458
|
+
# Create toolset from inline Lua tools
|
|
1459
|
+
try:
|
|
1460
|
+
from tactus.adapters.lua_tools import LuaToolsAdapter
|
|
1461
|
+
|
|
1462
|
+
lua_adapter = LuaToolsAdapter(
|
|
1463
|
+
tool_primitive=self.tool_primitive, mock_manager=self.mock_manager
|
|
1464
|
+
)
|
|
1465
|
+
|
|
1466
|
+
# Create a toolset from inline tool definitions
|
|
1467
|
+
return lua_adapter.create_inline_toolset(name, tools_list)
|
|
1468
|
+
except Exception as e:
|
|
1469
|
+
logger.error(
|
|
1470
|
+
f"Failed to create inline toolset '{name}': {e}", exc_info=True
|
|
1471
|
+
)
|
|
1472
|
+
return None
|
|
1473
|
+
else:
|
|
1474
|
+
# Tools list contains tool names - create a combined toolset
|
|
1475
|
+
from pydantic_ai.toolsets import CombinedToolset
|
|
1476
|
+
|
|
1477
|
+
resolved_tools = []
|
|
1478
|
+
for tool_name in tools_list:
|
|
1479
|
+
# Try to resolve each tool
|
|
1480
|
+
tool = self.resolve_toolset(tool_name)
|
|
1481
|
+
if tool:
|
|
1482
|
+
resolved_tools.append(tool)
|
|
1483
|
+
else:
|
|
1484
|
+
logger.warning(f"Tool '{tool_name}' not found for toolset '{name}'")
|
|
1485
|
+
|
|
1486
|
+
if resolved_tools:
|
|
1487
|
+
return CombinedToolset(resolved_tools)
|
|
1488
|
+
else:
|
|
1489
|
+
logger.error(f"No valid tools found for toolset '{name}'")
|
|
1490
|
+
return None
|
|
1491
|
+
|
|
1492
|
+
elif "use" in definition:
|
|
1493
|
+
# Import toolset from external source
|
|
1494
|
+
source = definition["use"]
|
|
1495
|
+
|
|
1496
|
+
# Handle different source types
|
|
1497
|
+
if source.startswith("./") or source.endswith(".tac"):
|
|
1498
|
+
# Import from local .tac file
|
|
1499
|
+
from pathlib import Path
|
|
1500
|
+
|
|
1501
|
+
# Resolve path relative to the source file if available
|
|
1502
|
+
if self.source_file_path and source.startswith("./"):
|
|
1503
|
+
base_dir = Path(self.source_file_path).parent
|
|
1504
|
+
file_path = base_dir / source[2:] # Remove "./" prefix
|
|
1505
|
+
else:
|
|
1506
|
+
file_path = Path(source)
|
|
1507
|
+
|
|
1508
|
+
# Check if file exists
|
|
1509
|
+
if not file_path.exists():
|
|
1510
|
+
logger.error(f"Toolset source file not found: {file_path}")
|
|
1511
|
+
return None
|
|
1512
|
+
|
|
1513
|
+
if not file_path.suffix == ".tac":
|
|
1514
|
+
logger.error(f"Toolset source must be a .tac file: {file_path}")
|
|
1515
|
+
return None
|
|
1516
|
+
|
|
1517
|
+
# For now, log a warning that this is partially implemented
|
|
1518
|
+
# In a full implementation, we would:
|
|
1519
|
+
# 1. Parse the .tac file
|
|
1520
|
+
# 2. Extract all Tool and Toolset definitions
|
|
1521
|
+
# 3. Create a combined toolset with all tools from the file
|
|
1522
|
+
logger.warning(
|
|
1523
|
+
f"Toolset import from .tac file '{file_path}' is partially implemented. "
|
|
1524
|
+
"Currently returns empty toolset. Full implementation would extract all "
|
|
1525
|
+
"tools and toolsets from the file."
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
# Return an empty toolset for now
|
|
1529
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
1530
|
+
|
|
1531
|
+
return FunctionToolset(tools=[])
|
|
1532
|
+
|
|
1533
|
+
elif source.startswith("mcp."):
|
|
1534
|
+
# Reference MCP server
|
|
1535
|
+
server_name = source[4:] # Remove "mcp." prefix
|
|
1536
|
+
return self.resolve_toolset(server_name)
|
|
1537
|
+
else:
|
|
1538
|
+
logger.error(f"Unknown toolset source '{source}' for '{name}'")
|
|
1539
|
+
return None
|
|
1540
|
+
|
|
1541
|
+
else:
|
|
1542
|
+
logger.error(f"Toolset '{name}' has neither 'type', 'tools', nor 'use' field")
|
|
1543
|
+
return None
|
|
1544
|
+
|
|
1545
|
+
def _parse_toolset_expressions(self, expressions: list) -> list:
|
|
1546
|
+
"""
|
|
1547
|
+
Parse toolset expressions from agent config.
|
|
1548
|
+
|
|
1549
|
+
Supports:
|
|
1550
|
+
- Simple string: "financial" -> entire toolset
|
|
1551
|
+
- Filter dict: {name = "plexus", include = ["score_info"]}
|
|
1552
|
+
- Exclude dict: {name = "web", exclude = ["admin"]}
|
|
1553
|
+
- Prefix dict: {name = "web", prefix = "search_"}
|
|
1554
|
+
- Rename dict: {name = "tools", rename = {old = "new"}}
|
|
1555
|
+
|
|
1556
|
+
Args:
|
|
1557
|
+
expressions: List of toolset references or transformation dicts
|
|
1558
|
+
|
|
1559
|
+
Returns:
|
|
1560
|
+
List of AbstractToolset instances
|
|
1561
|
+
"""
|
|
1562
|
+
result = []
|
|
1563
|
+
|
|
1564
|
+
for expr in expressions:
|
|
1565
|
+
if isinstance(expr, str):
|
|
1566
|
+
# Simple reference - resolve by name
|
|
1567
|
+
logger.debug(
|
|
1568
|
+
f"Resolving toolset '{expr}' from registry with {len(self.toolset_registry)} entries"
|
|
1569
|
+
)
|
|
1570
|
+
toolset = self.resolve_toolset(expr)
|
|
1571
|
+
if toolset is None:
|
|
1572
|
+
logger.error(f"Toolset '{expr}' not found in registry")
|
|
1573
|
+
raise ValueError(f"Toolset '{expr}' not found")
|
|
1574
|
+
result.append(toolset)
|
|
1575
|
+
|
|
1576
|
+
elif isinstance(expr, dict):
|
|
1577
|
+
# Transformation expression
|
|
1578
|
+
name = expr.get("name")
|
|
1579
|
+
if not name:
|
|
1580
|
+
raise ValueError(f"Toolset expression missing 'name': {expr}")
|
|
1581
|
+
|
|
1582
|
+
toolset = self.resolve_toolset(name)
|
|
1583
|
+
if toolset is None:
|
|
1584
|
+
raise ValueError(f"Toolset '{name}' not found")
|
|
1585
|
+
|
|
1586
|
+
# Apply transformations in order
|
|
1587
|
+
if "include" in expr:
|
|
1588
|
+
# Filter to specific tools
|
|
1589
|
+
tool_names = set(expr["include"])
|
|
1590
|
+
toolset = toolset.filtered(lambda ctx, tool: tool.name in tool_names)
|
|
1591
|
+
logger.debug(f"Applied include filter to toolset '{name}': {tool_names}")
|
|
1592
|
+
|
|
1593
|
+
if "exclude" in expr:
|
|
1594
|
+
# Exclude specific tools
|
|
1595
|
+
tool_names = set(expr["exclude"])
|
|
1596
|
+
toolset = toolset.filtered(lambda ctx, tool: tool.name not in tool_names)
|
|
1597
|
+
logger.debug(f"Applied exclude filter to toolset '{name}': {tool_names}")
|
|
1598
|
+
|
|
1599
|
+
if "prefix" in expr:
|
|
1600
|
+
# Add prefix to tool names
|
|
1601
|
+
prefix = expr["prefix"]
|
|
1602
|
+
toolset = toolset.prefixed(prefix)
|
|
1603
|
+
logger.debug(f"Applied prefix '{prefix}' to toolset '{name}'")
|
|
1604
|
+
|
|
1605
|
+
if "rename" in expr:
|
|
1606
|
+
# Rename tools
|
|
1607
|
+
rename_map = expr["rename"]
|
|
1608
|
+
toolset = toolset.renamed(rename_map)
|
|
1609
|
+
logger.debug(f"Applied rename to toolset '{name}': {rename_map}")
|
|
1610
|
+
|
|
1611
|
+
result.append(toolset)
|
|
1612
|
+
else:
|
|
1613
|
+
raise ValueError(f"Invalid toolset expression: {expr} (type: {type(expr)})")
|
|
1614
|
+
|
|
1615
|
+
return result
|
|
1616
|
+
|
|
1617
|
+
async def _initialize_dependencies(self):
|
|
1618
|
+
"""Initialize user-declared dependencies from registry."""
|
|
1619
|
+
# Only initialize if registry exists and has dependencies
|
|
1620
|
+
if not self.registry or not self.registry.dependencies:
|
|
1621
|
+
logger.debug("No dependencies declared in procedure")
|
|
1622
|
+
return
|
|
1623
|
+
|
|
1624
|
+
logger.info(f"Initializing {len(self.registry.dependencies)} dependencies")
|
|
1625
|
+
|
|
1626
|
+
# Import dependency infrastructure
|
|
1627
|
+
from tactus.core.dependencies import ResourceFactory, ResourceManager
|
|
1628
|
+
|
|
1629
|
+
# Create ResourceManager for lifecycle management
|
|
1630
|
+
self.dependency_manager = ResourceManager()
|
|
1631
|
+
|
|
1632
|
+
# Build config dict for ResourceFactory
|
|
1633
|
+
dependencies_config = {}
|
|
1634
|
+
for dep_name, dep_decl in self.registry.dependencies.items():
|
|
1635
|
+
dependencies_config[dep_name] = dep_decl.config
|
|
1636
|
+
|
|
1637
|
+
try:
|
|
1638
|
+
# Create all dependencies
|
|
1639
|
+
self.user_dependencies = await ResourceFactory.create_all(dependencies_config)
|
|
1640
|
+
|
|
1641
|
+
# Register with manager for cleanup
|
|
1642
|
+
for dep_name, dep_instance in self.user_dependencies.items():
|
|
1643
|
+
await self.dependency_manager.add_resource(dep_name, dep_instance)
|
|
1644
|
+
|
|
1645
|
+
logger.info(
|
|
1646
|
+
f"Successfully initialized dependencies: {list(self.user_dependencies.keys())}"
|
|
1647
|
+
)
|
|
1648
|
+
|
|
1649
|
+
except Exception as e:
|
|
1650
|
+
logger.error(f"Failed to initialize dependencies: {e}")
|
|
1651
|
+
raise RuntimeError(f"Dependency initialization failed: {e}")
|
|
1652
|
+
|
|
1653
|
+
async def _setup_agents(self, context: Dict[str, Any]):
|
|
1654
|
+
"""
|
|
1655
|
+
Setup agent primitives with LLMs and tools using Pydantic AI.
|
|
1656
|
+
|
|
1657
|
+
Args:
|
|
1658
|
+
context: Procedure context with pre-loaded data
|
|
1659
|
+
"""
|
|
1660
|
+
logger.info(
|
|
1661
|
+
f"_setup_agents called. Toolset registry has {len(self.toolset_registry)} toolsets: {list(self.toolset_registry.keys())}"
|
|
1662
|
+
)
|
|
1663
|
+
|
|
1664
|
+
# Initialize user dependencies first (needed by agents)
|
|
1665
|
+
await self._initialize_dependencies()
|
|
1666
|
+
|
|
1667
|
+
# Get agent configurations
|
|
1668
|
+
agents_config = self.config.get("agents", {})
|
|
1669
|
+
|
|
1670
|
+
if not agents_config:
|
|
1671
|
+
logger.info("No agents defined in configuration - skipping agent setup")
|
|
1672
|
+
return
|
|
1673
|
+
|
|
1674
|
+
# Import DSPy agent primitive (required)
|
|
1675
|
+
from tactus.dspy.agent import create_dspy_agent
|
|
1676
|
+
|
|
1677
|
+
logger.info("Using DSPy-based Agent implementation")
|
|
1678
|
+
|
|
1679
|
+
# Get default toolsets from config (for agents that don't specify toolsets)
|
|
1680
|
+
default_toolset_names = self.config.get("default_toolsets", [])
|
|
1681
|
+
if default_toolset_names:
|
|
1682
|
+
logger.info(f"Default toolsets configured: {default_toolset_names}")
|
|
1683
|
+
|
|
1684
|
+
# Setup each agent
|
|
1685
|
+
for agent_name, agent_config in agents_config.items():
|
|
1686
|
+
# Skip if agent was already created during immediate initialization
|
|
1687
|
+
if agent_name in self.agents:
|
|
1688
|
+
logger.debug(
|
|
1689
|
+
f"Agent '{agent_name}' already created during parsing - skipping setup"
|
|
1690
|
+
)
|
|
1691
|
+
continue
|
|
1692
|
+
|
|
1693
|
+
logger.info(f"Setting up agent: {agent_name}")
|
|
1694
|
+
|
|
1695
|
+
# Get agent prompts (initial_message needs template processing, system_prompt is dynamic)
|
|
1696
|
+
system_prompt_template = agent_config[
|
|
1697
|
+
"system_prompt"
|
|
1698
|
+
] # Keep as template for dynamic rendering
|
|
1699
|
+
|
|
1700
|
+
# initial_message is optional - if not provided, will default to empty string or manual injection
|
|
1701
|
+
initial_message_raw = agent_config.get("initial_message", "")
|
|
1702
|
+
initial_message = (
|
|
1703
|
+
self._process_template(initial_message_raw, context) if initial_message_raw else ""
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
# Provider is required - no defaults
|
|
1707
|
+
provider_name = agent_config.get("provider") or self.config.get("default_provider")
|
|
1708
|
+
if not provider_name:
|
|
1709
|
+
raise ValueError(
|
|
1710
|
+
f"Agent '{agent_name}' must specify a 'provider' (either on the agent or as 'default_provider' in the procedure)"
|
|
1711
|
+
)
|
|
1712
|
+
|
|
1713
|
+
# Handle model - can be string or dict with settings
|
|
1714
|
+
model_config = agent_config.get("model") or self.config.get("default_model") or "gpt-4o"
|
|
1715
|
+
model_settings = None
|
|
1716
|
+
|
|
1717
|
+
if isinstance(model_config, dict):
|
|
1718
|
+
# Model is a dict with name and settings
|
|
1719
|
+
model_id = model_config.get("name")
|
|
1720
|
+
# Extract settings (everything except 'name')
|
|
1721
|
+
model_settings = {k: v for k, v in model_config.items() if k != "name"}
|
|
1722
|
+
if model_settings:
|
|
1723
|
+
logger.info(f"Agent '{agent_name}' using model settings: {model_settings}")
|
|
1724
|
+
else:
|
|
1725
|
+
# Model is a simple string
|
|
1726
|
+
model_id = model_config
|
|
1727
|
+
|
|
1728
|
+
# If model_id has a provider prefix AND no explicit provider was set, extract it
|
|
1729
|
+
if (
|
|
1730
|
+
":" in model_id
|
|
1731
|
+
and not agent_config.get("provider")
|
|
1732
|
+
and not self.config.get("default_provider")
|
|
1733
|
+
):
|
|
1734
|
+
prefix, model_id = model_id.split(":", 1)
|
|
1735
|
+
provider_name = prefix
|
|
1736
|
+
|
|
1737
|
+
# Construct the full model string for pydantic-ai
|
|
1738
|
+
model_name = f"{provider_name}:{model_id}"
|
|
1739
|
+
|
|
1740
|
+
logger.info(
|
|
1741
|
+
f"Agent '{agent_name}' using provider '{provider_name}' with model '{model_id}'"
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
# Handle inline Lua function tools (agent.inline_tools)
|
|
1745
|
+
inline_tools_toolset = None
|
|
1746
|
+
if "inline_tools" in agent_config and agent_config["inline_tools"]:
|
|
1747
|
+
tools_spec = agent_config["inline_tools"]
|
|
1748
|
+
# These are inline tool definitions (dicts with 'handler' key)
|
|
1749
|
+
if isinstance(tools_spec, list):
|
|
1750
|
+
inline_tool_specs = [
|
|
1751
|
+
t
|
|
1752
|
+
for t in tools_spec
|
|
1753
|
+
if isinstance(t, dict)
|
|
1754
|
+
and ("handler" in t or (1 in t and callable(t.get(1))))
|
|
1755
|
+
]
|
|
1756
|
+
if inline_tool_specs:
|
|
1757
|
+
# These are inline Lua function tools
|
|
1758
|
+
try:
|
|
1759
|
+
from tactus.adapters.lua_tools import LuaToolsAdapter
|
|
1760
|
+
|
|
1761
|
+
lua_adapter = LuaToolsAdapter(
|
|
1762
|
+
tool_primitive=self.tool_primitive, mock_manager=self.mock_manager
|
|
1763
|
+
)
|
|
1764
|
+
inline_tools_toolset = lua_adapter.create_inline_tools_toolset(
|
|
1765
|
+
agent_name, inline_tool_specs
|
|
1766
|
+
)
|
|
1767
|
+
logger.info(
|
|
1768
|
+
f"Agent '{agent_name}' has {len(inline_tool_specs)} inline Lua tools"
|
|
1769
|
+
)
|
|
1770
|
+
except ImportError as e:
|
|
1771
|
+
logger.error(
|
|
1772
|
+
f"Could not import LuaToolsAdapter for agent '{agent_name}': {e}"
|
|
1773
|
+
)
|
|
1774
|
+
|
|
1775
|
+
# Get tools (tool/toolset references) for this agent
|
|
1776
|
+
# Use a sentinel value to distinguish "not present" from "present but None/empty"
|
|
1777
|
+
_MISSING = object()
|
|
1778
|
+
agent_tools_config = agent_config.get("tools", _MISSING)
|
|
1779
|
+
|
|
1780
|
+
# Debug log
|
|
1781
|
+
logger.debug(
|
|
1782
|
+
f"Agent '{agent_name}' raw tools config: {agent_tools_config}, type: {type(agent_tools_config)}"
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1785
|
+
# Convert Lua table to Python list if needed
|
|
1786
|
+
if (
|
|
1787
|
+
agent_tools_config is not _MISSING
|
|
1788
|
+
and agent_tools_config is not None
|
|
1789
|
+
and hasattr(agent_tools_config, "__len__")
|
|
1790
|
+
):
|
|
1791
|
+
try:
|
|
1792
|
+
# Try to convert Lua table to list
|
|
1793
|
+
agent_tools_config = (
|
|
1794
|
+
list(agent_tools_config.values())
|
|
1795
|
+
if hasattr(agent_tools_config, "values")
|
|
1796
|
+
else list(agent_tools_config)
|
|
1797
|
+
)
|
|
1798
|
+
logger.debug(f"Agent '{agent_name}' converted tools to: {agent_tools_config}")
|
|
1799
|
+
except (TypeError, AttributeError):
|
|
1800
|
+
# If conversion fails, leave as-is
|
|
1801
|
+
pass
|
|
1802
|
+
|
|
1803
|
+
if agent_tools_config is _MISSING:
|
|
1804
|
+
# No tools key present - use default toolsets if configured, otherwise all
|
|
1805
|
+
if default_toolset_names:
|
|
1806
|
+
filtered_toolsets = self._parse_toolset_expressions(default_toolset_names)
|
|
1807
|
+
logger.info(
|
|
1808
|
+
f"Agent '{agent_name}' using default toolsets: {default_toolset_names}"
|
|
1809
|
+
)
|
|
1810
|
+
else:
|
|
1811
|
+
# No defaults configured - use all available toolsets from registry
|
|
1812
|
+
filtered_toolsets = list(self.toolset_registry.values())
|
|
1813
|
+
logger.info(
|
|
1814
|
+
f"Agent '{agent_name}' using all available toolsets (no defaults configured)"
|
|
1815
|
+
)
|
|
1816
|
+
elif isinstance(agent_tools_config, list) and len(agent_tools_config) == 0:
|
|
1817
|
+
# Explicitly empty list - no tools
|
|
1818
|
+
# Use None instead of [] to completely disable tool calling for Bedrock models
|
|
1819
|
+
filtered_toolsets = None
|
|
1820
|
+
logger.info(f"Agent '{agent_name}' has NO tools (explicitly empty - passing None)")
|
|
1821
|
+
else:
|
|
1822
|
+
# Parse toolset expressions
|
|
1823
|
+
logger.info(f"Agent '{agent_name}' raw tools config: {agent_tools_config}")
|
|
1824
|
+
filtered_toolsets = self._parse_toolset_expressions(agent_tools_config)
|
|
1825
|
+
logger.info(f"Agent '{agent_name}' parsed toolsets: {filtered_toolsets}")
|
|
1826
|
+
|
|
1827
|
+
# Append inline tools toolset if present
|
|
1828
|
+
if inline_tools_toolset:
|
|
1829
|
+
if filtered_toolsets is None:
|
|
1830
|
+
# Agent had no toolsets, create list with just inline tools
|
|
1831
|
+
filtered_toolsets = [inline_tools_toolset]
|
|
1832
|
+
else:
|
|
1833
|
+
# Append to existing toolsets
|
|
1834
|
+
filtered_toolsets.append(inline_tools_toolset)
|
|
1835
|
+
logger.debug(f"Added inline tools toolset to agent '{agent_name}'")
|
|
1836
|
+
|
|
1837
|
+
# Legacy: Keep empty tools list for AgentPrimitive constructor
|
|
1838
|
+
filtered_tools = []
|
|
1839
|
+
|
|
1840
|
+
# Handle structured output if specified
|
|
1841
|
+
output_schema = None # Initialize for DSPy agent
|
|
1842
|
+
|
|
1843
|
+
# Prefer output (aligned with pydantic-ai)
|
|
1844
|
+
if agent_config.get("output"):
|
|
1845
|
+
try:
|
|
1846
|
+
self._create_pydantic_model_from_output(
|
|
1847
|
+
agent_config["output"], f"{agent_name}Output"
|
|
1848
|
+
)
|
|
1849
|
+
logger.info(f"Using agent output schema for '{agent_name}'")
|
|
1850
|
+
# Also set output_schema for DSPy compatibility
|
|
1851
|
+
output_schema = agent_config["output"]
|
|
1852
|
+
except Exception as e:
|
|
1853
|
+
logger.warning(f"Failed to create output model from output: {e}")
|
|
1854
|
+
elif agent_config.get("output_schema"):
|
|
1855
|
+
# Fallback to output_schema for backward compatibility
|
|
1856
|
+
output_schema = agent_config["output_schema"]
|
|
1857
|
+
try:
|
|
1858
|
+
self._create_output_model_from_schema(output_schema, f"{agent_name}Output")
|
|
1859
|
+
logger.info(f"Created structured output model for agent '{agent_name}'")
|
|
1860
|
+
except Exception as e:
|
|
1861
|
+
logger.warning(f"Failed to create output model for agent '{agent_name}': {e}")
|
|
1862
|
+
elif self.config.get("output"):
|
|
1863
|
+
# Procedure-level output schemas apply to procedures, not agents.
|
|
1864
|
+
# Only use them as a fallback for agent structured output when they are
|
|
1865
|
+
# object-shaped (i.e., a dict of fields). Scalar procedure outputs
|
|
1866
|
+
# (e.g., `output = field.string{...}`) are not agent output schemas.
|
|
1867
|
+
procedure_output_schema = self.config["output"]
|
|
1868
|
+
if (
|
|
1869
|
+
isinstance(procedure_output_schema, dict)
|
|
1870
|
+
and "type" not in procedure_output_schema
|
|
1871
|
+
):
|
|
1872
|
+
output_schema = procedure_output_schema
|
|
1873
|
+
try:
|
|
1874
|
+
self._create_output_model_from_schema(output_schema, f"{agent_name}Output")
|
|
1875
|
+
logger.info(f"Using procedure-level output schema for agent '{agent_name}'")
|
|
1876
|
+
except Exception as e:
|
|
1877
|
+
logger.warning(f"Failed to create output model from procedure schema: {e}")
|
|
1878
|
+
|
|
1879
|
+
# Extract message history filter if configured
|
|
1880
|
+
message_history_filter = None
|
|
1881
|
+
if agent_config.get("message_history"):
|
|
1882
|
+
message_history_config = agent_config["message_history"]
|
|
1883
|
+
if isinstance(message_history_config, dict) and "filter" in message_history_config:
|
|
1884
|
+
message_history_filter = message_history_config["filter"]
|
|
1885
|
+
logger.info(
|
|
1886
|
+
f"Agent '{agent_name}' has message history filter: {message_history_filter}"
|
|
1887
|
+
)
|
|
1888
|
+
|
|
1889
|
+
# Create DSPy-based agent
|
|
1890
|
+
dspy_config = {
|
|
1891
|
+
"system_prompt": system_prompt_template,
|
|
1892
|
+
"model": model_name,
|
|
1893
|
+
"provider": agent_config.get("provider"),
|
|
1894
|
+
"tools": filtered_tools,
|
|
1895
|
+
"toolsets": filtered_toolsets,
|
|
1896
|
+
"output_schema": output_schema,
|
|
1897
|
+
"temperature": (
|
|
1898
|
+
model_settings.get("temperature", 0.7)
|
|
1899
|
+
if model_settings
|
|
1900
|
+
else agent_config.get("temperature", 0.7)
|
|
1901
|
+
),
|
|
1902
|
+
"max_tokens": (
|
|
1903
|
+
model_settings.get("max_tokens")
|
|
1904
|
+
if model_settings
|
|
1905
|
+
else agent_config.get("max_tokens")
|
|
1906
|
+
),
|
|
1907
|
+
"model_type": (
|
|
1908
|
+
model_settings.get("model_type")
|
|
1909
|
+
if model_settings
|
|
1910
|
+
else agent_config.get("model_type")
|
|
1911
|
+
),
|
|
1912
|
+
"disable_streaming": agent_config.get("disable_streaming", False),
|
|
1913
|
+
"initial_message": initial_message,
|
|
1914
|
+
"log_handler": self.log_handler,
|
|
1915
|
+
}
|
|
1916
|
+
|
|
1917
|
+
# Create DSPy agent with registry and mock_manager for mock support
|
|
1918
|
+
agent_primitive = create_dspy_agent(
|
|
1919
|
+
agent_name,
|
|
1920
|
+
dspy_config,
|
|
1921
|
+
registry=self.registry,
|
|
1922
|
+
mock_manager=self.mock_manager,
|
|
1923
|
+
)
|
|
1924
|
+
|
|
1925
|
+
# Store additional context for compatibility
|
|
1926
|
+
agent_primitive._tool_primitive = self.tool_primitive
|
|
1927
|
+
agent_primitive._state_primitive = self.state_primitive
|
|
1928
|
+
agent_primitive._context = context
|
|
1929
|
+
|
|
1930
|
+
self.agents[agent_name] = agent_primitive
|
|
1931
|
+
|
|
1932
|
+
logger.info(f"Agent '{agent_name}' configured successfully with model '{model_name}'")
|
|
1933
|
+
|
|
1934
|
+
async def _setup_models(self):
|
|
1935
|
+
"""
|
|
1936
|
+
Setup model primitives for ML inference.
|
|
1937
|
+
|
|
1938
|
+
Creates ModelPrimitive instances for each model declaration
|
|
1939
|
+
and stores them in self.models dict.
|
|
1940
|
+
"""
|
|
1941
|
+
# Get model configurations from registry
|
|
1942
|
+
if not self.registry or not self.registry.models:
|
|
1943
|
+
logger.debug("No models defined in configuration - skipping model setup")
|
|
1944
|
+
return
|
|
1945
|
+
|
|
1946
|
+
from tactus.primitives.model import ModelPrimitive
|
|
1947
|
+
|
|
1948
|
+
# Setup each model
|
|
1949
|
+
for model_name, model_config in self.registry.models.items():
|
|
1950
|
+
logger.info(f"Setting up model: {model_name}")
|
|
1951
|
+
|
|
1952
|
+
try:
|
|
1953
|
+
model_primitive = ModelPrimitive(
|
|
1954
|
+
model_name=model_name,
|
|
1955
|
+
config=model_config,
|
|
1956
|
+
context=self.execution_context,
|
|
1957
|
+
mock_manager=self.mock_manager,
|
|
1958
|
+
)
|
|
1959
|
+
|
|
1960
|
+
self.models[model_name] = model_primitive
|
|
1961
|
+
logger.info(f"Model '{model_name}' configured successfully")
|
|
1962
|
+
|
|
1963
|
+
except Exception as e:
|
|
1964
|
+
logger.error(f"Failed to setup model '{model_name}': {e}")
|
|
1965
|
+
raise
|
|
1966
|
+
|
|
1967
|
+
def _create_pydantic_model_from_output(self, output_schema, model_name: str) -> type:
|
|
1968
|
+
"""
|
|
1969
|
+
Convert output schema to Pydantic model.
|
|
1970
|
+
|
|
1971
|
+
Aligned with pydantic-ai's output parameter.
|
|
1972
|
+
|
|
1973
|
+
Args:
|
|
1974
|
+
output_schema: AgentOutputSchema or dict with field definitions
|
|
1975
|
+
model_name: Name for the generated Pydantic model
|
|
1976
|
+
|
|
1977
|
+
Returns:
|
|
1978
|
+
Dynamically created Pydantic model class
|
|
1979
|
+
"""
|
|
1980
|
+
from pydantic import create_model
|
|
1981
|
+
from typing import Optional
|
|
1982
|
+
|
|
1983
|
+
fields = {}
|
|
1984
|
+
|
|
1985
|
+
# Handle AgentOutputSchema object
|
|
1986
|
+
if hasattr(output_schema, "fields"):
|
|
1987
|
+
schema_fields = output_schema.fields
|
|
1988
|
+
else:
|
|
1989
|
+
# Assume it's a dict
|
|
1990
|
+
schema_fields = output_schema
|
|
1991
|
+
|
|
1992
|
+
for field_name, field_def in schema_fields.items():
|
|
1993
|
+
# Extract field properties
|
|
1994
|
+
if hasattr(field_def, "type"):
|
|
1995
|
+
field_type_str = field_def.type
|
|
1996
|
+
required = getattr(field_def, "required", True)
|
|
1997
|
+
else:
|
|
1998
|
+
# Fields from registry are plain dicts (FieldDefinition type is lost)
|
|
1999
|
+
# Trust that they were created with field builders
|
|
2000
|
+
field_type_str = field_def.get("type", "string")
|
|
2001
|
+
required = field_def.get("required", True)
|
|
2002
|
+
|
|
2003
|
+
# Map type string to Python type
|
|
2004
|
+
field_type = self._map_type_string(field_type_str)
|
|
2005
|
+
|
|
2006
|
+
# Create field tuple (type, default_or_required)
|
|
2007
|
+
if required:
|
|
2008
|
+
fields[field_name] = (field_type, ...) # Required field
|
|
2009
|
+
else:
|
|
2010
|
+
fields[field_name] = (Optional[field_type], None) # Optional field
|
|
2011
|
+
|
|
2012
|
+
return create_model(model_name, **fields)
|
|
2013
|
+
|
|
2014
|
+
def _map_type_string(self, type_str: str) -> type:
|
|
2015
|
+
"""Map type string to Python type."""
|
|
2016
|
+
type_map = {
|
|
2017
|
+
"string": str,
|
|
2018
|
+
"str": str,
|
|
2019
|
+
"number": float,
|
|
2020
|
+
"float": float,
|
|
2021
|
+
"integer": int,
|
|
2022
|
+
"int": int,
|
|
2023
|
+
"boolean": bool,
|
|
2024
|
+
"bool": bool,
|
|
2025
|
+
"object": dict,
|
|
2026
|
+
"dict": dict,
|
|
2027
|
+
"array": list,
|
|
2028
|
+
"list": list,
|
|
2029
|
+
}
|
|
2030
|
+
return type_map.get(type_str.lower(), str)
|
|
2031
|
+
|
|
2032
|
+
def _create_output_model_from_schema(
|
|
2033
|
+
self, output_schema: Dict[str, Any], model_name: str = "OutputModel"
|
|
2034
|
+
) -> type:
|
|
2035
|
+
"""
|
|
2036
|
+
Create a Pydantic model from output schema definition.
|
|
2037
|
+
|
|
2038
|
+
Args:
|
|
2039
|
+
output_schema: Dictionary mapping field names to field definitions
|
|
2040
|
+
model_name: Name for the generated model
|
|
2041
|
+
|
|
2042
|
+
Returns:
|
|
2043
|
+
Pydantic model class
|
|
2044
|
+
"""
|
|
2045
|
+
from pydantic import create_model, Field # noqa: F401
|
|
2046
|
+
|
|
2047
|
+
fields = {}
|
|
2048
|
+
for field_name, field_def in output_schema.items():
|
|
2049
|
+
# Fields from registry are plain dicts (FieldDefinition type is lost)
|
|
2050
|
+
# Trust that they were created with field builders
|
|
2051
|
+
field_type_str = field_def.get("type", "string")
|
|
2052
|
+
is_required = field_def.get("required", False)
|
|
2053
|
+
|
|
2054
|
+
# Map type strings to Python types
|
|
2055
|
+
type_mapping = {
|
|
2056
|
+
"string": str,
|
|
2057
|
+
"integer": int,
|
|
2058
|
+
"number": float,
|
|
2059
|
+
"boolean": bool,
|
|
2060
|
+
"array": list,
|
|
2061
|
+
"object": dict,
|
|
2062
|
+
}
|
|
2063
|
+
python_type = type_mapping.get(field_type_str, str)
|
|
2064
|
+
|
|
2065
|
+
# Create Field with description if available
|
|
2066
|
+
description = field_def.get("description", "")
|
|
2067
|
+
if is_required:
|
|
2068
|
+
field = (
|
|
2069
|
+
Field(..., description=description) if description else Field(...) # noqa: F821
|
|
2070
|
+
)
|
|
2071
|
+
else:
|
|
2072
|
+
default = field_def.get("default", None)
|
|
2073
|
+
field = (
|
|
2074
|
+
Field(default=default, description=description) # noqa: F821
|
|
2075
|
+
if description
|
|
2076
|
+
else Field(default=default) # noqa: F821
|
|
2077
|
+
)
|
|
2078
|
+
|
|
2079
|
+
fields[field_name] = (python_type, field)
|
|
2080
|
+
|
|
2081
|
+
return create_model(model_name, **fields) # noqa: F821
|
|
2082
|
+
|
|
2083
|
+
def _enhance_handles(self):
|
|
2084
|
+
"""
|
|
2085
|
+
Connect DSL handles to their actual primitives (fallback for handles not already connected).
|
|
2086
|
+
|
|
2087
|
+
With immediate agent creation, most handles are already connected during parsing.
|
|
2088
|
+
This method now serves as a fallback for:
|
|
2089
|
+
- Handles created before runtime context was available
|
|
2090
|
+
- Model handles (which may still use two-phase initialization)
|
|
2091
|
+
- Cases where immediate creation failed
|
|
2092
|
+
|
|
2093
|
+
This is called from _inject_primitives() after all primitives are ready.
|
|
2094
|
+
"""
|
|
2095
|
+
from tactus.primitives.handles import AgentHandle, ModelHandle
|
|
2096
|
+
|
|
2097
|
+
# Get registries (stored during _parse_dsl_source)
|
|
2098
|
+
if not hasattr(self, "_dsl_registries"):
|
|
2099
|
+
logger.debug("No DSL registries found - skipping handle enhancement")
|
|
2100
|
+
return
|
|
2101
|
+
|
|
2102
|
+
agent_registry = self._dsl_registries.get("agent", {})
|
|
2103
|
+
model_registry = self._dsl_registries.get("model", {})
|
|
2104
|
+
|
|
2105
|
+
# Enhance agent handles (only if not already connected)
|
|
2106
|
+
enhanced_count = 0
|
|
2107
|
+
execution_context_updated_count = 0
|
|
2108
|
+
for agent_name, primitive in self.agents.items():
|
|
2109
|
+
if agent_name in agent_registry:
|
|
2110
|
+
handle = agent_registry[agent_name]
|
|
2111
|
+
if isinstance(handle, AgentHandle):
|
|
2112
|
+
# Only enhance if not already connected
|
|
2113
|
+
if handle._primitive is None:
|
|
2114
|
+
handle._set_primitive(primitive, self.execution_context)
|
|
2115
|
+
logger.info(f"Enhanced AgentHandle '{agent_name}' (fallback)")
|
|
2116
|
+
enhanced_count += 1
|
|
2117
|
+
else:
|
|
2118
|
+
# For immediate agents: primitive is already connected but execution_context might be None
|
|
2119
|
+
# Update execution_context if needed (it wasn't available during parsing)
|
|
2120
|
+
if handle._execution_context is None and self.execution_context is not None:
|
|
2121
|
+
handle._execution_context = self.execution_context
|
|
2122
|
+
logger.info(
|
|
2123
|
+
f"[CHECKPOINT] Updated execution_context for immediately-created agent '{agent_name}', handle id={id(handle)}, handle in Lua globals={agent_name in self.lua_sandbox.lua.globals()}"
|
|
2124
|
+
)
|
|
2125
|
+
execution_context_updated_count += 1
|
|
2126
|
+
else:
|
|
2127
|
+
logger.debug(f"AgentHandle '{agent_name}' already connected - skipping")
|
|
2128
|
+
else:
|
|
2129
|
+
logger.warning(
|
|
2130
|
+
f"Agent registry entry '{agent_name}' is not an AgentHandle: {type(handle)}"
|
|
2131
|
+
)
|
|
2132
|
+
|
|
2133
|
+
# Enhance model handles (only if not already connected)
|
|
2134
|
+
for model_name, primitive in self.models.items():
|
|
2135
|
+
if model_name in model_registry:
|
|
2136
|
+
handle = model_registry[model_name]
|
|
2137
|
+
if isinstance(handle, ModelHandle):
|
|
2138
|
+
if handle._primitive is None:
|
|
2139
|
+
handle._set_primitive(primitive)
|
|
2140
|
+
logger.info(f"Enhanced ModelHandle '{model_name}' (fallback)")
|
|
2141
|
+
enhanced_count += 1
|
|
2142
|
+
else:
|
|
2143
|
+
logger.debug(f"ModelHandle '{model_name}' already connected - skipping")
|
|
2144
|
+
else:
|
|
2145
|
+
logger.warning(
|
|
2146
|
+
f"Model registry entry '{model_name}' is not a ModelHandle: {type(handle)}"
|
|
2147
|
+
)
|
|
2148
|
+
|
|
2149
|
+
if enhanced_count > 0:
|
|
2150
|
+
logger.debug(f"Handle enhancement (fallback) connected {enhanced_count} handles")
|
|
2151
|
+
elif execution_context_updated_count > 0:
|
|
2152
|
+
logger.info(
|
|
2153
|
+
f"[CHECKPOINT] Updated execution_context for {execution_context_updated_count} immediately-created agents"
|
|
2154
|
+
)
|
|
2155
|
+
else:
|
|
2156
|
+
logger.debug("All handles already connected during parsing")
|
|
2157
|
+
|
|
2158
|
+
def _inject_primitives(self):
|
|
2159
|
+
"""Inject all primitives into Lua global scope."""
|
|
2160
|
+
# Inject input with default values, then override with context values
|
|
2161
|
+
if "input" in self.config:
|
|
2162
|
+
input_config = self.config["input"]
|
|
2163
|
+
input_values = {}
|
|
2164
|
+
# Start with defaults
|
|
2165
|
+
for input_name, input_def in input_config.items():
|
|
2166
|
+
if isinstance(input_def, dict) and "default" in input_def:
|
|
2167
|
+
input_values[input_name] = input_def["default"]
|
|
2168
|
+
# Override with context values
|
|
2169
|
+
for input_name in input_config.keys():
|
|
2170
|
+
if input_name in self.context:
|
|
2171
|
+
input_values[input_name] = self.context[input_name]
|
|
2172
|
+
|
|
2173
|
+
# Validate enum constraints
|
|
2174
|
+
for input_name, input_value in input_values.items():
|
|
2175
|
+
if input_name in input_config:
|
|
2176
|
+
input_def = input_config[input_name]
|
|
2177
|
+
if isinstance(input_def, dict) and "enum" in input_def and input_def["enum"]:
|
|
2178
|
+
allowed_values = input_def["enum"]
|
|
2179
|
+
if input_value not in allowed_values:
|
|
2180
|
+
raise ValueError(
|
|
2181
|
+
f"Input '{input_name}' has invalid value '{input_value}'. "
|
|
2182
|
+
f"Allowed values: {allowed_values}"
|
|
2183
|
+
)
|
|
2184
|
+
|
|
2185
|
+
# Convert Python lists/dicts to Lua tables for proper array/object handling
|
|
2186
|
+
def convert_to_lua(value):
|
|
2187
|
+
"""Recursively convert Python lists and dicts to Lua tables."""
|
|
2188
|
+
if isinstance(value, list):
|
|
2189
|
+
# Convert Python list to Lua table (1-indexed)
|
|
2190
|
+
lua_table = self.lua_sandbox.lua.table()
|
|
2191
|
+
for i, item in enumerate(value, 1):
|
|
2192
|
+
lua_table[i] = convert_to_lua(item)
|
|
2193
|
+
return lua_table
|
|
2194
|
+
elif isinstance(value, dict):
|
|
2195
|
+
# Convert Python dict to Lua table
|
|
2196
|
+
lua_table = self.lua_sandbox.lua.table()
|
|
2197
|
+
for k, v in value.items():
|
|
2198
|
+
lua_table[k] = convert_to_lua(v)
|
|
2199
|
+
return lua_table
|
|
2200
|
+
else:
|
|
2201
|
+
return value
|
|
2202
|
+
|
|
2203
|
+
# Convert all inputs, creating a new Lua table for the input object
|
|
2204
|
+
lua_input = self.lua_sandbox.lua.table()
|
|
2205
|
+
for key, value in input_values.items():
|
|
2206
|
+
lua_input[key] = convert_to_lua(value)
|
|
2207
|
+
|
|
2208
|
+
self.lua_sandbox.set_global("input", lua_input)
|
|
2209
|
+
logger.info(f"Injected input into Lua sandbox: {input_values}")
|
|
2210
|
+
|
|
2211
|
+
# Re-inject state primitive (may have been updated with schema)
|
|
2212
|
+
if self.state_primitive:
|
|
2213
|
+
# Replace the placeholder _state_primitive with the real one
|
|
2214
|
+
# (The metatable was already set up during parsing, so it will use this new primitive)
|
|
2215
|
+
self.lua_sandbox.inject_primitive("_state_primitive", self.state_primitive)
|
|
2216
|
+
logger.debug(
|
|
2217
|
+
"State primitive re-injected (metatable already configured during parsing)"
|
|
2218
|
+
)
|
|
2219
|
+
if self.iterations_primitive:
|
|
2220
|
+
self.lua_sandbox.inject_primitive("Iterations", self.iterations_primitive)
|
|
2221
|
+
if self.stop_primitive:
|
|
2222
|
+
self.lua_sandbox.inject_primitive("Stop", self.stop_primitive)
|
|
2223
|
+
if self.tool_primitive:
|
|
2224
|
+
self.lua_sandbox.inject_primitive("Tool", self.tool_primitive)
|
|
2225
|
+
if self.toolset_primitive:
|
|
2226
|
+
self.lua_sandbox.inject_primitive("Toolset", self.toolset_primitive)
|
|
2227
|
+
logger.info(f"Injecting Toolset primitive: {self.toolset_primitive}")
|
|
2228
|
+
|
|
2229
|
+
# Inject checkpoint primitives
|
|
2230
|
+
if self.step_primitive:
|
|
2231
|
+
self.lua_sandbox.inject_primitive("Step", self.step_primitive)
|
|
2232
|
+
|
|
2233
|
+
# Inject checkpoint as _python_checkpoint, then wrap it with Lua code
|
|
2234
|
+
# that captures source location using debug.getinfo
|
|
2235
|
+
self.lua_sandbox.inject_primitive("_python_checkpoint", self.step_primitive.checkpoint)
|
|
2236
|
+
|
|
2237
|
+
# Create Lua wrapper that captures source location before calling Python
|
|
2238
|
+
self.lua_sandbox.lua.execute(
|
|
2239
|
+
"""
|
|
2240
|
+
function checkpoint(fn)
|
|
2241
|
+
-- Capture caller's source location (2 levels up: this wrapper -> caller)
|
|
2242
|
+
local info = debug.getinfo(2, 'Sl')
|
|
2243
|
+
if info then
|
|
2244
|
+
local source_info = {
|
|
2245
|
+
file = info.source,
|
|
2246
|
+
line = info.currentline or 0
|
|
2247
|
+
}
|
|
2248
|
+
-- Call Python checkpoint with source location
|
|
2249
|
+
return _python_checkpoint(fn, source_info)
|
|
2250
|
+
else
|
|
2251
|
+
-- Fallback if debug info not available
|
|
2252
|
+
return _python_checkpoint(fn, nil)
|
|
2253
|
+
end
|
|
2254
|
+
end
|
|
2255
|
+
"""
|
|
2256
|
+
)
|
|
2257
|
+
logger.debug("Checkpoint wrapper injected with Lua source location tracking")
|
|
2258
|
+
|
|
2259
|
+
if self.checkpoint_primitive:
|
|
2260
|
+
self.lua_sandbox.inject_primitive("Checkpoint", self.checkpoint_primitive)
|
|
2261
|
+
logger.debug("Step and Checkpoint primitives injected")
|
|
2262
|
+
|
|
2263
|
+
# Inject HITL primitives
|
|
2264
|
+
if self.human_primitive:
|
|
2265
|
+
logger.info(f"Injecting Human primitive: {self.human_primitive}")
|
|
2266
|
+
self.lua_sandbox.inject_primitive("Human", self.human_primitive)
|
|
2267
|
+
|
|
2268
|
+
if self.log_primitive:
|
|
2269
|
+
logger.info(f"Injecting Log primitive: {self.log_primitive}")
|
|
2270
|
+
self.lua_sandbox.inject_primitive("Log", self.log_primitive)
|
|
2271
|
+
|
|
2272
|
+
if self.message_history_primitive:
|
|
2273
|
+
logger.info(f"Injecting MessageHistory primitive: {self.message_history_primitive}")
|
|
2274
|
+
self.lua_sandbox.inject_primitive("MessageHistory", self.message_history_primitive)
|
|
2275
|
+
|
|
2276
|
+
if self.json_primitive:
|
|
2277
|
+
logger.info(f"Injecting Json primitive: {self.json_primitive}")
|
|
2278
|
+
self.lua_sandbox.inject_primitive("Json", self.json_primitive)
|
|
2279
|
+
|
|
2280
|
+
if self.retry_primitive:
|
|
2281
|
+
logger.info(f"Injecting Retry primitive: {self.retry_primitive}")
|
|
2282
|
+
self.lua_sandbox.inject_primitive("Retry", self.retry_primitive)
|
|
2283
|
+
|
|
2284
|
+
if self.file_primitive:
|
|
2285
|
+
logger.info(f"Injecting File primitive: {self.file_primitive}")
|
|
2286
|
+
self.lua_sandbox.inject_primitive("File", self.file_primitive)
|
|
2287
|
+
|
|
2288
|
+
if self.procedure_primitive:
|
|
2289
|
+
logger.info(f"Injecting Procedure primitive: {self.procedure_primitive}")
|
|
2290
|
+
self.lua_sandbox.inject_primitive("Procedure", self.procedure_primitive)
|
|
2291
|
+
|
|
2292
|
+
if self.system_primitive:
|
|
2293
|
+
logger.info(f"Injecting System primitive: {self.system_primitive}")
|
|
2294
|
+
self.lua_sandbox.inject_primitive("System", self.system_primitive)
|
|
2295
|
+
|
|
2296
|
+
if self.host_primitive:
|
|
2297
|
+
logger.info(f"Injecting Host primitive: {self.host_primitive}")
|
|
2298
|
+
self.lua_sandbox.inject_primitive("Host", self.host_primitive)
|
|
2299
|
+
|
|
2300
|
+
# Inject Sleep function
|
|
2301
|
+
def sleep_wrapper(seconds):
|
|
2302
|
+
"""Sleep for specified number of seconds."""
|
|
2303
|
+
logger.info(f"Sleep({seconds}) - pausing execution")
|
|
2304
|
+
time.sleep(seconds)
|
|
2305
|
+
logger.info(f"Sleep({seconds}) - resuming execution")
|
|
2306
|
+
|
|
2307
|
+
self.lua_sandbox.set_global("Sleep", sleep_wrapper)
|
|
2308
|
+
logger.info("Injected Sleep function")
|
|
2309
|
+
|
|
2310
|
+
# NOTE: Agent and model primitives are NO LONGER auto-injected with capitalized names.
|
|
2311
|
+
# Instead, use the new syntax:
|
|
2312
|
+
# agent "greeter" { config } -- define
|
|
2313
|
+
# Agent("greeter")() -- lookup and call
|
|
2314
|
+
# Or assign during definition:
|
|
2315
|
+
# Greeter = agent "greeter" { config }
|
|
2316
|
+
# Greeter() -- callable syntax
|
|
2317
|
+
|
|
2318
|
+
# Enhance DSL handles to connect them to actual primitives
|
|
2319
|
+
self._enhance_handles()
|
|
2320
|
+
|
|
2321
|
+
logger.debug("All primitives injected into Lua sandbox")
|
|
2322
|
+
|
|
2323
|
+
def _execute_workflow(self) -> Any:
|
|
2324
|
+
"""
|
|
2325
|
+
Execute the Lua procedure code.
|
|
2326
|
+
|
|
2327
|
+
Looks for named 'main' procedure first, falls back to anonymous procedure.
|
|
2328
|
+
|
|
2329
|
+
Returns:
|
|
2330
|
+
Result from Lua procedure execution
|
|
2331
|
+
"""
|
|
2332
|
+
if self.registry:
|
|
2333
|
+
# Check for named 'main' procedure first
|
|
2334
|
+
if "main" in self.registry.named_procedures:
|
|
2335
|
+
logger.info("Executing named 'main' procedure")
|
|
2336
|
+
try:
|
|
2337
|
+
main_proc = self.registry.named_procedures["main"]
|
|
2338
|
+
|
|
2339
|
+
logger.debug(
|
|
2340
|
+
f"Executing main: function={main_proc['function']}, "
|
|
2341
|
+
f"type={type(main_proc['function'])}"
|
|
2342
|
+
)
|
|
2343
|
+
|
|
2344
|
+
# Create callable wrapper for main
|
|
2345
|
+
from tactus.primitives.procedure_callable import ProcedureCallable
|
|
2346
|
+
|
|
2347
|
+
main_callable = ProcedureCallable(
|
|
2348
|
+
name="main",
|
|
2349
|
+
procedure_function=main_proc["function"],
|
|
2350
|
+
input_schema=main_proc["input_schema"],
|
|
2351
|
+
output_schema=main_proc["output_schema"],
|
|
2352
|
+
state_schema=main_proc["state_schema"],
|
|
2353
|
+
execution_context=self.execution_context,
|
|
2354
|
+
lua_sandbox=self.lua_sandbox,
|
|
2355
|
+
is_main=True, # Main procedure is not checkpointed
|
|
2356
|
+
)
|
|
2357
|
+
|
|
2358
|
+
# Gather input parameters from context, applying defaults
|
|
2359
|
+
input_params = {}
|
|
2360
|
+
for key, field_def in main_proc["input_schema"].items():
|
|
2361
|
+
# Check context first
|
|
2362
|
+
if hasattr(self, "context") and self.context and key in self.context:
|
|
2363
|
+
input_params[key] = self.context[key]
|
|
2364
|
+
# Apply default if available and not required
|
|
2365
|
+
elif isinstance(field_def, dict) and "default" in field_def:
|
|
2366
|
+
# Fields created by field builders will have proper structure
|
|
2367
|
+
# We can't check FieldDefinition type here as it's lost during storage
|
|
2368
|
+
input_params[key] = field_def["default"]
|
|
2369
|
+
# If required and not in context, it will fail validation in ProcedureCallable
|
|
2370
|
+
|
|
2371
|
+
logger.debug(f"Calling main with input_params: {input_params}")
|
|
2372
|
+
|
|
2373
|
+
# Execute main procedure
|
|
2374
|
+
result = main_callable(input_params)
|
|
2375
|
+
|
|
2376
|
+
# Convert Lua table result to Python dict if needed
|
|
2377
|
+
# Check for lupa table (not Python dict/list)
|
|
2378
|
+
if (
|
|
2379
|
+
result is not None
|
|
2380
|
+
and hasattr(result, "items")
|
|
2381
|
+
and not isinstance(result, (dict, list))
|
|
2382
|
+
):
|
|
2383
|
+
result = lua_table_to_dict(result)
|
|
2384
|
+
|
|
2385
|
+
logger.info("Named 'main' procedure execution completed successfully")
|
|
2386
|
+
return result
|
|
2387
|
+
except Exception as e:
|
|
2388
|
+
logger.error(f"Named 'main' procedure execution failed: {e}")
|
|
2389
|
+
raise LuaSandboxError(f"Named 'main' procedure execution failed: {e}")
|
|
2390
|
+
|
|
2391
|
+
else:
|
|
2392
|
+
# No main procedure found - check if we have top-level execution result
|
|
2393
|
+
if hasattr(self, "_top_level_result") and self._top_level_result is not None:
|
|
2394
|
+
logger.info("No main Procedure found - using top-level execution result")
|
|
2395
|
+
return self._top_level_result
|
|
2396
|
+
else:
|
|
2397
|
+
raise RuntimeError("Named 'main' procedure not found in registry")
|
|
2398
|
+
|
|
2399
|
+
# Legacy YAML: execute procedure code string
|
|
2400
|
+
procedure_code = self.config["procedure"]
|
|
2401
|
+
logger.debug(f"Executing legacy procedure code ({len(procedure_code)} bytes)")
|
|
2402
|
+
|
|
2403
|
+
try:
|
|
2404
|
+
result = self.lua_sandbox.execute(procedure_code)
|
|
2405
|
+
logger.info("Legacy procedure execution completed successfully")
|
|
2406
|
+
return result
|
|
2407
|
+
|
|
2408
|
+
except LuaSandboxError as e:
|
|
2409
|
+
logger.error(f"Legacy procedure execution failed: {e}")
|
|
2410
|
+
raise
|
|
2411
|
+
|
|
2412
|
+
def _maybe_transform_script_mode_source(self, source: str) -> str:
|
|
2413
|
+
"""
|
|
2414
|
+
Transform "script mode" source into an implicit Procedure wrapper.
|
|
2415
|
+
|
|
2416
|
+
Script mode allows:
|
|
2417
|
+
input { ... }
|
|
2418
|
+
output { ... }
|
|
2419
|
+
-- declarations (Agent/Tool/Mocks/etc.)
|
|
2420
|
+
-- executable code
|
|
2421
|
+
return {...}
|
|
2422
|
+
|
|
2423
|
+
During parsing, the Lua chunk is executed to collect declarations, but agents
|
|
2424
|
+
are not yet wired to toolsets/LLMs. Without transformation, top-level code
|
|
2425
|
+
would execute too early. We split declaration blocks from executable code and
|
|
2426
|
+
wrap the executable portion into an implicit `Procedure { function(input) ... end }`.
|
|
2427
|
+
"""
|
|
2428
|
+
import re
|
|
2429
|
+
|
|
2430
|
+
# If an explicit Procedure exists (any syntax), do not transform.
|
|
2431
|
+
# Examples:
|
|
2432
|
+
# Procedure { ... }
|
|
2433
|
+
# main = Procedure { ... }
|
|
2434
|
+
# Procedure "main" { ... }
|
|
2435
|
+
# main = Procedure "main" { ... }
|
|
2436
|
+
if re.search(r"(?m)^\s*(?:[A-Za-z_][A-Za-z0-9_]*\s*=\s*)?Procedure\b", source):
|
|
2437
|
+
return source
|
|
2438
|
+
|
|
2439
|
+
# Detect script mode by top-level input/output declarations OR a top-level `return`.
|
|
2440
|
+
# We intentionally treat simple "hello world" scripts as script-mode so agent/tool
|
|
2441
|
+
# calls don't execute during the parse/declaration phase.
|
|
2442
|
+
if not re.search(r"(?m)^\s*(input|output)\s*\{", source) and not re.search(
|
|
2443
|
+
r"(?m)^\s*return\b", source
|
|
2444
|
+
):
|
|
2445
|
+
return source
|
|
2446
|
+
|
|
2447
|
+
# Split into declaration prefix vs executable body.
|
|
2448
|
+
decl_lines: list[str] = []
|
|
2449
|
+
body_lines: list[str] = []
|
|
2450
|
+
|
|
2451
|
+
# Once we enter executable code, everything stays in the body.
|
|
2452
|
+
in_body = False
|
|
2453
|
+
brace_depth = 0
|
|
2454
|
+
long_string_eq: str | None = None
|
|
2455
|
+
|
|
2456
|
+
decl_start = re.compile(
|
|
2457
|
+
r"^\s*(?:"
|
|
2458
|
+
r"input|output|Mocks|Agent|Toolset|Tool|Model|Module|Signature|LM|Dependency|Prompt|"
|
|
2459
|
+
r"Specifications|Evaluation|Evaluations|"
|
|
2460
|
+
r"default_provider|default_model|return_prompt|error_prompt|status_prompt|async|"
|
|
2461
|
+
r"max_depth|max_turns"
|
|
2462
|
+
r")\b"
|
|
2463
|
+
)
|
|
2464
|
+
require_stmt = re.compile(r"^\s*(?:local\s+)?[A-Za-z_][A-Za-z0-9_]*\s*=\s*require\(")
|
|
2465
|
+
assignment_decl = re.compile(
|
|
2466
|
+
r"^\s*[A-Za-z_][A-Za-z0-9_]*\s*=\s*(?:"
|
|
2467
|
+
r"Agent|Toolset|Tool|Model|Module|Signature|LM|Dependency|Prompt"
|
|
2468
|
+
r")\b"
|
|
2469
|
+
)
|
|
2470
|
+
|
|
2471
|
+
long_string_open = re.compile(r"\[(=*)\[")
|
|
2472
|
+
|
|
2473
|
+
for line in source.splitlines():
|
|
2474
|
+
if in_body:
|
|
2475
|
+
body_lines.append(line)
|
|
2476
|
+
continue
|
|
2477
|
+
|
|
2478
|
+
stripped = line.strip()
|
|
2479
|
+
|
|
2480
|
+
# If we're inside a Lua long-bracket string (e.g., Specification([[ ... ]]) / Specifications([[ ... ]]))
|
|
2481
|
+
# keep consuming lines as declarations until we see the closing delimiter.
|
|
2482
|
+
if long_string_eq is not None:
|
|
2483
|
+
decl_lines.append(line)
|
|
2484
|
+
if f"]{long_string_eq}]" in line:
|
|
2485
|
+
long_string_eq = None
|
|
2486
|
+
continue
|
|
2487
|
+
|
|
2488
|
+
# If we're inside a declaration block, keep consuming until braces balance.
|
|
2489
|
+
added_to_decl = False
|
|
2490
|
+
if brace_depth > 0:
|
|
2491
|
+
decl_lines.append(line)
|
|
2492
|
+
added_to_decl = True
|
|
2493
|
+
elif stripped == "" or stripped.startswith("--"):
|
|
2494
|
+
decl_lines.append(line)
|
|
2495
|
+
added_to_decl = True
|
|
2496
|
+
elif decl_start.match(line) or assignment_decl.match(line) or require_stmt.match(line):
|
|
2497
|
+
decl_lines.append(line)
|
|
2498
|
+
added_to_decl = True
|
|
2499
|
+
else:
|
|
2500
|
+
in_body = True
|
|
2501
|
+
body_lines.append(line)
|
|
2502
|
+
|
|
2503
|
+
# Track Lua long-bracket strings opened in the declaration prefix (e.g. Specification([[...]])).
|
|
2504
|
+
# We only need a lightweight heuristic here; spec/eval blocks should be simple and well-formed.
|
|
2505
|
+
if added_to_decl:
|
|
2506
|
+
m = long_string_open.search(line)
|
|
2507
|
+
if m:
|
|
2508
|
+
eq = m.group(1)
|
|
2509
|
+
# If the opening and closing are on the same line, don't enter long-string mode.
|
|
2510
|
+
if f"]{eq}]" not in line[m.end() :]:
|
|
2511
|
+
long_string_eq = eq
|
|
2512
|
+
|
|
2513
|
+
# Update brace depth based on a lightweight heuristic (sufficient for DSL blocks).
|
|
2514
|
+
# This intentionally ignores Lua string/comment edge cases; declarations should be simple.
|
|
2515
|
+
brace_depth += line.count("{") - line.count("}")
|
|
2516
|
+
if brace_depth < 0:
|
|
2517
|
+
brace_depth = 0
|
|
2518
|
+
|
|
2519
|
+
# If there is no executable code, nothing to wrap.
|
|
2520
|
+
if not any(line.strip() for line in body_lines):
|
|
2521
|
+
return source
|
|
2522
|
+
|
|
2523
|
+
# Indent executable code inside the implicit procedure function.
|
|
2524
|
+
indented_body = "\n".join((" " + line) if line != "" else "" for line in body_lines)
|
|
2525
|
+
|
|
2526
|
+
transformed = "\n".join(
|
|
2527
|
+
[
|
|
2528
|
+
*decl_lines,
|
|
2529
|
+
"",
|
|
2530
|
+
"Procedure {",
|
|
2531
|
+
" function(input)",
|
|
2532
|
+
indented_body,
|
|
2533
|
+
" end",
|
|
2534
|
+
"}",
|
|
2535
|
+
"",
|
|
2536
|
+
]
|
|
2537
|
+
)
|
|
2538
|
+
|
|
2539
|
+
return transformed
|
|
2540
|
+
|
|
2541
|
+
def _process_template(self, template: str, context: Dict[str, Any]) -> str:
|
|
2542
|
+
"""
|
|
2543
|
+
Process a template string with variable substitution.
|
|
2544
|
+
|
|
2545
|
+
Args:
|
|
2546
|
+
template: Template string with {variable} placeholders
|
|
2547
|
+
context: Context dict with variable values
|
|
2548
|
+
|
|
2549
|
+
Returns:
|
|
2550
|
+
Processed string with variables substituted
|
|
2551
|
+
"""
|
|
2552
|
+
try:
|
|
2553
|
+
# Build template variables from context (supports dot notation)
|
|
2554
|
+
from string import Formatter
|
|
2555
|
+
|
|
2556
|
+
class DotFormatter(Formatter):
|
|
2557
|
+
def get_field(self, field_name, args, kwargs):
|
|
2558
|
+
# Support dot notation like {params.topic}
|
|
2559
|
+
parts = field_name.split(".")
|
|
2560
|
+
obj = kwargs
|
|
2561
|
+
for part in parts:
|
|
2562
|
+
if isinstance(obj, dict):
|
|
2563
|
+
obj = obj.get(part, "")
|
|
2564
|
+
else:
|
|
2565
|
+
obj = getattr(obj, part, "")
|
|
2566
|
+
return obj, field_name
|
|
2567
|
+
|
|
2568
|
+
template_vars = {}
|
|
2569
|
+
|
|
2570
|
+
# Add context variables
|
|
2571
|
+
if context:
|
|
2572
|
+
template_vars.update(context)
|
|
2573
|
+
|
|
2574
|
+
# Add input from config with default values
|
|
2575
|
+
if "input" in self.config:
|
|
2576
|
+
input_config = self.config["input"]
|
|
2577
|
+
input_values = {}
|
|
2578
|
+
for input_name, input_def in input_config.items():
|
|
2579
|
+
if isinstance(input_def, dict) and "default" in input_def:
|
|
2580
|
+
input_values[input_name] = input_def["default"]
|
|
2581
|
+
template_vars["input"] = input_values
|
|
2582
|
+
|
|
2583
|
+
# Add state (for dynamic templates)
|
|
2584
|
+
if self.state_primitive:
|
|
2585
|
+
template_vars["state"] = self.state_primitive.all()
|
|
2586
|
+
|
|
2587
|
+
# Use dot-notation formatter
|
|
2588
|
+
formatter = DotFormatter()
|
|
2589
|
+
result = formatter.format(template, **template_vars)
|
|
2590
|
+
return result
|
|
2591
|
+
|
|
2592
|
+
except KeyError as e:
|
|
2593
|
+
logger.warning(f"Template variable {e} not found, using template as-is")
|
|
2594
|
+
return template
|
|
2595
|
+
|
|
2596
|
+
except Exception as e:
|
|
2597
|
+
logger.error(f"Error processing template: {e}")
|
|
2598
|
+
return template
|
|
2599
|
+
|
|
2600
|
+
def _format_output_schema_for_prompt(self) -> str:
|
|
2601
|
+
"""
|
|
2602
|
+
Format the output schema as guidance for LLM prompts.
|
|
2603
|
+
|
|
2604
|
+
Returns:
|
|
2605
|
+
Formatted string describing expected outputs
|
|
2606
|
+
"""
|
|
2607
|
+
outputs = self.config.get("output", {})
|
|
2608
|
+
if not outputs:
|
|
2609
|
+
return ""
|
|
2610
|
+
|
|
2611
|
+
lines = ["## Expected Output Format", ""]
|
|
2612
|
+
lines.append("This workflow must return a structured result with the following fields:")
|
|
2613
|
+
lines.append("")
|
|
2614
|
+
|
|
2615
|
+
# Format each output field
|
|
2616
|
+
for field_name, field_def in outputs.items():
|
|
2617
|
+
# Fields from registry are plain dicts (FieldDefinition type is lost)
|
|
2618
|
+
# Trust that they were created with field builders
|
|
2619
|
+
field_type = field_def.get("type", "any")
|
|
2620
|
+
is_required = field_def.get("required", False)
|
|
2621
|
+
description = field_def.get("description", "")
|
|
2622
|
+
|
|
2623
|
+
req_marker = "**REQUIRED**" if is_required else "*optional*"
|
|
2624
|
+
lines.append(f"- **{field_name}** ({field_type}) - {req_marker}")
|
|
2625
|
+
if description:
|
|
2626
|
+
lines.append(f" {description}")
|
|
2627
|
+
lines.append("")
|
|
2628
|
+
|
|
2629
|
+
lines.append(
|
|
2630
|
+
"Note: The workflow orchestration code will extract and format these values from your tool calls and actions."
|
|
2631
|
+
)
|
|
2632
|
+
|
|
2633
|
+
return "\n".join(lines)
|
|
2634
|
+
|
|
2635
|
+
def get_state(self) -> Dict[str, Any]:
|
|
2636
|
+
"""Get current procedure state."""
|
|
2637
|
+
if self.state_primitive:
|
|
2638
|
+
return self.state_primitive.all()
|
|
2639
|
+
return {}
|
|
2640
|
+
|
|
2641
|
+
def get_iteration_count(self) -> int:
|
|
2642
|
+
"""Get current iteration count."""
|
|
2643
|
+
if self.iterations_primitive:
|
|
2644
|
+
return self.iterations_primitive.current()
|
|
2645
|
+
return 0
|
|
2646
|
+
|
|
2647
|
+
def is_stopped(self) -> bool:
|
|
2648
|
+
"""Check if procedure was stopped."""
|
|
2649
|
+
if self.stop_primitive:
|
|
2650
|
+
return self.stop_primitive.requested()
|
|
2651
|
+
return False
|
|
2652
|
+
|
|
2653
|
+
def _parse_declarations(
|
|
2654
|
+
self, source: str, tool_primitive: Optional[ToolPrimitive] = None
|
|
2655
|
+
) -> ProcedureRegistry:
|
|
2656
|
+
"""
|
|
2657
|
+
Execute .tac to collect declarations.
|
|
2658
|
+
|
|
2659
|
+
Args:
|
|
2660
|
+
source: Lua DSL source code
|
|
2661
|
+
tool_primitive: Optional ToolPrimitive for creating callable ToolHandles
|
|
2662
|
+
|
|
2663
|
+
Returns:
|
|
2664
|
+
ProcedureRegistry with all declarations
|
|
2665
|
+
|
|
2666
|
+
Raises:
|
|
2667
|
+
TactusRuntimeError: If validation fails
|
|
2668
|
+
"""
|
|
2669
|
+
builder = RegistryBuilder()
|
|
2670
|
+
|
|
2671
|
+
# Use the existing sandbox so procedure functions have access to primitives
|
|
2672
|
+
sandbox = self.lua_sandbox
|
|
2673
|
+
|
|
2674
|
+
# Build runtime context for immediate agent creation
|
|
2675
|
+
runtime_context = {
|
|
2676
|
+
"tool_primitive": tool_primitive,
|
|
2677
|
+
"registry": builder.registry,
|
|
2678
|
+
"mock_manager": self.mock_manager,
|
|
2679
|
+
"execution_context": self.execution_context,
|
|
2680
|
+
"log_handler": self.log_handler,
|
|
2681
|
+
"_created_agents": {}, # Will be populated during parsing
|
|
2682
|
+
}
|
|
2683
|
+
|
|
2684
|
+
# Inject DSL stubs (pass tool_primitive, mock_manager, and runtime_context)
|
|
2685
|
+
stubs = create_dsl_stubs(
|
|
2686
|
+
builder, tool_primitive, mock_manager=self.mock_manager, runtime_context=runtime_context
|
|
2687
|
+
)
|
|
2688
|
+
|
|
2689
|
+
# Register any agents that were created immediately during parsing
|
|
2690
|
+
created_agents = runtime_context.get("_created_agents", {})
|
|
2691
|
+
logger.info(
|
|
2692
|
+
f"[AGENT_REGISTRATION] Found {len(created_agents)} immediately-created agents: {list(created_agents.keys())}"
|
|
2693
|
+
)
|
|
2694
|
+
for agent_name, agent_primitive in created_agents.items():
|
|
2695
|
+
self.agents[agent_name] = agent_primitive
|
|
2696
|
+
logger.info(
|
|
2697
|
+
f"[AGENT_REGISTRATION] Registered immediately-created agent '{agent_name}' in runtime.agents"
|
|
2698
|
+
)
|
|
2699
|
+
|
|
2700
|
+
# Store registries for later handle enhancement
|
|
2701
|
+
self._dsl_registries = stubs.pop("_registries", {})
|
|
2702
|
+
|
|
2703
|
+
# Extract the binding callback for assignment interception
|
|
2704
|
+
binding_callback = stubs.pop("_tactus_register_binding", None)
|
|
2705
|
+
|
|
2706
|
+
for name, stub in stubs.items():
|
|
2707
|
+
sandbox.set_global(name, stub)
|
|
2708
|
+
|
|
2709
|
+
# Enable assignment interception for new syntax (Phase B+)
|
|
2710
|
+
# This captures assignments like: multiply = Tool {...}
|
|
2711
|
+
if binding_callback:
|
|
2712
|
+
sandbox.setup_assignment_interception(binding_callback)
|
|
2713
|
+
|
|
2714
|
+
# Execute file - declarations self-register
|
|
2715
|
+
# Also capture the result for top-level code execution (when no Procedure blocks exist)
|
|
2716
|
+
try:
|
|
2717
|
+
execution_result = sandbox.execute(source)
|
|
2718
|
+
# Store result for later use if there's no main procedure
|
|
2719
|
+
self._top_level_result = execution_result
|
|
2720
|
+
except LuaSandboxError as e:
|
|
2721
|
+
raise TactusRuntimeError(f"Failed to parse DSL: {e}")
|
|
2722
|
+
|
|
2723
|
+
# Validate and return registry
|
|
2724
|
+
result = builder.validate()
|
|
2725
|
+
if not result.valid:
|
|
2726
|
+
error_messages = [f" - {err.message}" for err in result.errors]
|
|
2727
|
+
raise TactusRuntimeError("DSL validation failed:\n" + "\n".join(error_messages))
|
|
2728
|
+
|
|
2729
|
+
for warning in result.warnings:
|
|
2730
|
+
logger.warning(warning.message)
|
|
2731
|
+
|
|
2732
|
+
logger.debug(f"Registry after parsing: lua_tools={list(result.registry.lua_tools.keys())}")
|
|
2733
|
+
return result.registry
|
|
2734
|
+
|
|
2735
|
+
def _registry_to_config(self, registry: ProcedureRegistry) -> Dict[str, Any]:
|
|
2736
|
+
"""
|
|
2737
|
+
Convert registry to config dict format.
|
|
2738
|
+
|
|
2739
|
+
Args:
|
|
2740
|
+
registry: ProcedureRegistry
|
|
2741
|
+
|
|
2742
|
+
Returns:
|
|
2743
|
+
Config dict
|
|
2744
|
+
"""
|
|
2745
|
+
config = {}
|
|
2746
|
+
|
|
2747
|
+
if registry.description:
|
|
2748
|
+
config["description"] = registry.description
|
|
2749
|
+
|
|
2750
|
+
# Convert input schema
|
|
2751
|
+
if registry.input_schema:
|
|
2752
|
+
config["input"] = registry.input_schema
|
|
2753
|
+
|
|
2754
|
+
# Convert output schema
|
|
2755
|
+
if registry.output_schema:
|
|
2756
|
+
config["output"] = registry.output_schema
|
|
2757
|
+
|
|
2758
|
+
# Convert state schema
|
|
2759
|
+
if registry.state_schema:
|
|
2760
|
+
config["state"] = registry.state_schema
|
|
2761
|
+
|
|
2762
|
+
# Convert agents
|
|
2763
|
+
if registry.agents:
|
|
2764
|
+
config["agents"] = {}
|
|
2765
|
+
for name, agent in registry.agents.items():
|
|
2766
|
+
config["agents"][name] = {
|
|
2767
|
+
"provider": agent.provider,
|
|
2768
|
+
"model": agent.model,
|
|
2769
|
+
"system_prompt": agent.system_prompt,
|
|
2770
|
+
# Tools control tool calling availability (tool/toolset references + expressions)
|
|
2771
|
+
# Keep empty list as [] (not None) to preserve "explicitly no tools" intent
|
|
2772
|
+
"tools": agent.tools,
|
|
2773
|
+
"max_turns": agent.max_turns,
|
|
2774
|
+
"disable_streaming": agent.disable_streaming,
|
|
2775
|
+
}
|
|
2776
|
+
# Include model configuration parameters if present
|
|
2777
|
+
if agent.temperature is not None:
|
|
2778
|
+
config["agents"][name]["temperature"] = agent.temperature
|
|
2779
|
+
if agent.max_tokens is not None:
|
|
2780
|
+
config["agents"][name]["max_tokens"] = agent.max_tokens
|
|
2781
|
+
if agent.model_type is not None:
|
|
2782
|
+
config["agents"][name]["model_type"] = agent.model_type
|
|
2783
|
+
# Include inline tool definitions if present
|
|
2784
|
+
if agent.inline_tools:
|
|
2785
|
+
config["agents"][name]["inline_tools"] = agent.inline_tools
|
|
2786
|
+
if agent.initial_message:
|
|
2787
|
+
config["agents"][name]["initial_message"] = agent.initial_message
|
|
2788
|
+
if agent.output:
|
|
2789
|
+
config["agents"][name]["output_schema"] = {
|
|
2790
|
+
field_name: {
|
|
2791
|
+
"type": (
|
|
2792
|
+
field.field_type.value
|
|
2793
|
+
if hasattr(field.field_type, "value")
|
|
2794
|
+
else field.field_type
|
|
2795
|
+
),
|
|
2796
|
+
"required": field.required,
|
|
2797
|
+
}
|
|
2798
|
+
for field_name, field in agent.output.fields.items()
|
|
2799
|
+
}
|
|
2800
|
+
if agent.message_history:
|
|
2801
|
+
config["agents"][name]["message_history"] = {
|
|
2802
|
+
"source": agent.message_history.source,
|
|
2803
|
+
"filter": agent.message_history.filter,
|
|
2804
|
+
}
|
|
2805
|
+
|
|
2806
|
+
# Convert HITL points
|
|
2807
|
+
if registry.hitl_points:
|
|
2808
|
+
config["hitl"] = {}
|
|
2809
|
+
for name, hitl in registry.hitl_points.items():
|
|
2810
|
+
config["hitl"][name] = {
|
|
2811
|
+
"type": hitl.hitl_type,
|
|
2812
|
+
"message": hitl.message,
|
|
2813
|
+
}
|
|
2814
|
+
if hitl.timeout:
|
|
2815
|
+
config["hitl"][name]["timeout"] = hitl.timeout
|
|
2816
|
+
if hitl.default is not None:
|
|
2817
|
+
config["hitl"][name]["default"] = hitl.default
|
|
2818
|
+
if hitl.options:
|
|
2819
|
+
config["hitl"][name]["options"] = hitl.options
|
|
2820
|
+
|
|
2821
|
+
# Convert prompts
|
|
2822
|
+
if registry.prompts:
|
|
2823
|
+
config["prompts"] = registry.prompts
|
|
2824
|
+
if registry.return_prompt:
|
|
2825
|
+
config["return_prompt"] = registry.return_prompt
|
|
2826
|
+
if registry.error_prompt:
|
|
2827
|
+
config["error_prompt"] = registry.error_prompt
|
|
2828
|
+
if registry.status_prompt:
|
|
2829
|
+
config["status_prompt"] = registry.status_prompt
|
|
2830
|
+
|
|
2831
|
+
# Add default provider/model
|
|
2832
|
+
if registry.default_provider:
|
|
2833
|
+
config["default_provider"] = registry.default_provider
|
|
2834
|
+
if registry.default_model:
|
|
2835
|
+
config["default_model"] = registry.default_model
|
|
2836
|
+
|
|
2837
|
+
# The procedure code will be executed separately
|
|
2838
|
+
# Store a placeholder for compatibility
|
|
2839
|
+
config["procedure"] = "-- Procedure function stored in registry"
|
|
2840
|
+
|
|
2841
|
+
return config
|
|
2842
|
+
|
|
2843
|
+
def _create_runtime_for_procedure(
|
|
2844
|
+
self, procedure_name: str, params: Dict[str, Any]
|
|
2845
|
+
) -> "TactusRuntime":
|
|
2846
|
+
"""
|
|
2847
|
+
Create a new runtime instance for a sub-procedure.
|
|
2848
|
+
|
|
2849
|
+
Args:
|
|
2850
|
+
procedure_name: Name or path of the procedure to load
|
|
2851
|
+
params: Parameters to pass to the procedure
|
|
2852
|
+
|
|
2853
|
+
Returns:
|
|
2854
|
+
New TactusRuntime instance
|
|
2855
|
+
"""
|
|
2856
|
+
# Generate unique ID for sub-procedure
|
|
2857
|
+
sub_procedure_id = f"{self.procedure_id}_{procedure_name}_{uuid.uuid4().hex[:8]}"
|
|
2858
|
+
|
|
2859
|
+
# Create new runtime with incremented depth
|
|
2860
|
+
runtime = TactusRuntime(
|
|
2861
|
+
procedure_id=sub_procedure_id,
|
|
2862
|
+
storage_backend=self.storage_backend,
|
|
2863
|
+
hitl_handler=self.hitl_handler,
|
|
2864
|
+
chat_recorder=self.chat_recorder,
|
|
2865
|
+
mcp_server=self.mcp_server,
|
|
2866
|
+
openai_api_key=self.openai_api_key,
|
|
2867
|
+
log_handler=self.log_handler,
|
|
2868
|
+
recursion_depth=self.recursion_depth + 1,
|
|
2869
|
+
)
|
|
2870
|
+
|
|
2871
|
+
logger.info(
|
|
2872
|
+
f"Created runtime for sub-procedure '{procedure_name}' "
|
|
2873
|
+
f"(depth {self.recursion_depth + 1})"
|
|
2874
|
+
)
|
|
2875
|
+
|
|
2876
|
+
return runtime
|
|
2877
|
+
|
|
2878
|
+
def _load_procedure_by_name(self, name: str) -> str:
|
|
2879
|
+
"""
|
|
2880
|
+
Load procedure source code by name.
|
|
2881
|
+
|
|
2882
|
+
Args:
|
|
2883
|
+
name: Procedure name or file path
|
|
2884
|
+
|
|
2885
|
+
Returns:
|
|
2886
|
+
Procedure source code
|
|
2887
|
+
|
|
2888
|
+
Raises:
|
|
2889
|
+
FileNotFoundError: If procedure file not found
|
|
2890
|
+
"""
|
|
2891
|
+
import os
|
|
2892
|
+
|
|
2893
|
+
# Try different locations
|
|
2894
|
+
search_paths = [
|
|
2895
|
+
name, # Exact path
|
|
2896
|
+
f"{name}.tac", # Add extension
|
|
2897
|
+
f"examples/{name}", # Examples directory
|
|
2898
|
+
f"examples/{name}.tac", # Examples with extension
|
|
2899
|
+
]
|
|
2900
|
+
|
|
2901
|
+
for path in search_paths:
|
|
2902
|
+
if os.path.exists(path):
|
|
2903
|
+
logger.debug(f"Loading procedure from: {path}")
|
|
2904
|
+
with open(path, "r") as f:
|
|
2905
|
+
return f.read()
|
|
2906
|
+
|
|
2907
|
+
raise FileNotFoundError(f"Procedure '{name}' not found. Searched: {search_paths}")
|