tactus 0.31.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.2.dist-info/METADATA +1809 -0
- tactus-0.31.2.dist-info/RECORD +160 -0
- tactus-0.31.2.dist-info/WHEEL +4 -0
- tactus-0.31.2.dist-info/entry_points.txt +2 -0
- tactus-0.31.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1099 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Container execution manager for sandboxed procedure execution.
|
|
3
|
+
|
|
4
|
+
Handles spawning Docker containers, passing execution requests,
|
|
5
|
+
and collecting results via stdio communication.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
import shutil
|
|
14
|
+
import ssl
|
|
15
|
+
import sys
|
|
16
|
+
import tempfile
|
|
17
|
+
import time
|
|
18
|
+
import uuid
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
21
|
+
|
|
22
|
+
from .config import SandboxConfig
|
|
23
|
+
from .docker_manager import DockerManager, calculate_source_hash
|
|
24
|
+
from .protocol import (
|
|
25
|
+
ExecutionRequest,
|
|
26
|
+
ExecutionResult,
|
|
27
|
+
RESULT_END_MARKER,
|
|
28
|
+
RESULT_START_MARKER,
|
|
29
|
+
extract_result_from_stdout,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
_CONTAINER_LOG_RE = re.compile(
|
|
35
|
+
r"^(?P<asctime>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) "
|
|
36
|
+
r"\[(?P<level>[A-Z]+)\] "
|
|
37
|
+
r"(?P<logger>[^:]+): "
|
|
38
|
+
r"(?P<message>.*)$"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
_LEVEL_MAP = {
|
|
42
|
+
"DEBUG": logging.DEBUG,
|
|
43
|
+
"INFO": logging.INFO,
|
|
44
|
+
"WARNING": logging.WARNING,
|
|
45
|
+
"WARN": logging.WARNING,
|
|
46
|
+
"ERROR": logging.ERROR,
|
|
47
|
+
"CRITICAL": logging.CRITICAL,
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SandboxError(Exception):
|
|
52
|
+
"""Raised when sandbox execution fails."""
|
|
53
|
+
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class SandboxUnavailableError(SandboxError):
|
|
58
|
+
"""Raised when sandbox is required but Docker is unavailable."""
|
|
59
|
+
|
|
60
|
+
def __init__(self, reason: str):
|
|
61
|
+
self.reason = reason
|
|
62
|
+
super().__init__(
|
|
63
|
+
f"Docker sandbox unavailable: {reason}\n\n"
|
|
64
|
+
"Cannot run procedure without container isolation.\n"
|
|
65
|
+
"Either:\n"
|
|
66
|
+
" - Start Docker Desktop / Docker daemon\n"
|
|
67
|
+
" - Use --no-sandbox flag to explicitly run without isolation (security risk)\n"
|
|
68
|
+
" - Set sandbox.enabled: false in config to permanently disable (security risk)"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ContainerRunner:
|
|
73
|
+
"""
|
|
74
|
+
Runs procedures inside Docker containers.
|
|
75
|
+
|
|
76
|
+
Handles:
|
|
77
|
+
- Building Docker command with appropriate mounts and env vars
|
|
78
|
+
- Spawning container process
|
|
79
|
+
- Communicating via stdio (stdin for request, stdout for result)
|
|
80
|
+
- Streaming stderr for logs
|
|
81
|
+
- Timeout handling
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
_BLOCKED_CONTAINER_ENV_KEYS = {
|
|
85
|
+
# Keep sandbox containers secretless by default.
|
|
86
|
+
"OPENAI_API_KEY",
|
|
87
|
+
"GOOGLE_API_KEY",
|
|
88
|
+
"AWS_ACCESS_KEY_ID",
|
|
89
|
+
"AWS_SECRET_ACCESS_KEY",
|
|
90
|
+
"AWS_SESSION_TOKEN",
|
|
91
|
+
"AZURE_OPENAI_API_KEY",
|
|
92
|
+
"ANTHROPIC_API_KEY",
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
def __init__(self, config: SandboxConfig):
|
|
96
|
+
"""
|
|
97
|
+
Initialize container runner.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
config: Sandbox configuration.
|
|
101
|
+
"""
|
|
102
|
+
self.config = config
|
|
103
|
+
|
|
104
|
+
# Parse image name and tag from config.image (e.g., "tactus-sandbox:local")
|
|
105
|
+
image_parts = config.image.split(":")
|
|
106
|
+
image_name = image_parts[0] if len(image_parts) > 0 else "tactus-sandbox"
|
|
107
|
+
image_tag = image_parts[1] if len(image_parts) > 1 else "local"
|
|
108
|
+
|
|
109
|
+
self.docker_manager = DockerManager(
|
|
110
|
+
image_name=image_name,
|
|
111
|
+
image_tag=image_tag,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def _ensure_sandbox_up_to_date(self, skip_for_ide: bool = False) -> None:
|
|
115
|
+
"""
|
|
116
|
+
Automatically rebuild sandbox if code has changed.
|
|
117
|
+
|
|
118
|
+
This enables fast, automatic rebuilds during development without
|
|
119
|
+
requiring manual `tactus sandbox rebuild` commands. Uses source
|
|
120
|
+
hash for change detection with Docker layer caching for speed.
|
|
121
|
+
|
|
122
|
+
Can be disabled by setting TACTUS_AUTO_REBUILD_SANDBOX=false or
|
|
123
|
+
when running from IDE (to avoid blocking UI).
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
skip_for_ide: If True, skip rebuild (used when called from IDE)
|
|
127
|
+
|
|
128
|
+
Raises:
|
|
129
|
+
RuntimeError: If rebuild is needed but fails.
|
|
130
|
+
"""
|
|
131
|
+
# Skip auto-rebuild in IDE to avoid blocking UI
|
|
132
|
+
if skip_for_ide:
|
|
133
|
+
logger.debug("Auto-rebuild skipped for IDE execution")
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
# Check if auto-rebuild is disabled
|
|
137
|
+
auto_rebuild = os.environ.get("TACTUS_AUTO_REBUILD_SANDBOX", "true").lower()
|
|
138
|
+
if auto_rebuild not in ("true", "1", "yes"):
|
|
139
|
+
logger.debug("Auto-rebuild disabled via TACTUS_AUTO_REBUILD_SANDBOX")
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
# Get current version and source hash
|
|
143
|
+
from tactus import __version__
|
|
144
|
+
|
|
145
|
+
# Calculate tactus root from this file's location
|
|
146
|
+
# container_runner.py is in tactus/sandbox/, so root is 2 levels up
|
|
147
|
+
tactus_root = Path(__file__).parent.parent.parent
|
|
148
|
+
|
|
149
|
+
current_hash = calculate_source_hash(tactus_root)
|
|
150
|
+
|
|
151
|
+
# Check if rebuild is needed
|
|
152
|
+
if self.docker_manager.needs_rebuild(__version__, current_hash):
|
|
153
|
+
logger.info("Code changes detected, rebuilding sandbox...")
|
|
154
|
+
|
|
155
|
+
# Get paths
|
|
156
|
+
dockerfile_path = tactus_root / "tactus" / "docker" / "Dockerfile"
|
|
157
|
+
|
|
158
|
+
# Build with source hash
|
|
159
|
+
success, msg = self.docker_manager.build_image(
|
|
160
|
+
dockerfile_path=dockerfile_path,
|
|
161
|
+
context_path=tactus_root,
|
|
162
|
+
version=__version__,
|
|
163
|
+
source_hash=current_hash,
|
|
164
|
+
verbose=False,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
if not success:
|
|
168
|
+
raise RuntimeError(f"Failed to rebuild sandbox: {msg}")
|
|
169
|
+
|
|
170
|
+
logger.info("Sandbox rebuilt successfully")
|
|
171
|
+
else:
|
|
172
|
+
logger.debug("Sandbox is up to date")
|
|
173
|
+
|
|
174
|
+
def _find_tactus_source_dir(self) -> Optional[Path]:
|
|
175
|
+
"""
|
|
176
|
+
Find the Tactus source directory for development mode.
|
|
177
|
+
|
|
178
|
+
Searches in order:
|
|
179
|
+
1. TACTUS_DEV_PATH environment variable
|
|
180
|
+
2. Directory containing the tactus module (via __file__)
|
|
181
|
+
3. Current working directory if it contains tactus/ subdirectory
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Path to Tactus repository root, or None if not found.
|
|
185
|
+
"""
|
|
186
|
+
# Option 1: Explicit environment variable
|
|
187
|
+
env_path = os.environ.get("TACTUS_DEV_PATH")
|
|
188
|
+
if env_path:
|
|
189
|
+
path = Path(env_path).resolve()
|
|
190
|
+
if path.exists() and (path / "tactus").is_dir():
|
|
191
|
+
return path
|
|
192
|
+
|
|
193
|
+
# Option 2: Find via the tactus module location
|
|
194
|
+
try:
|
|
195
|
+
import tactus
|
|
196
|
+
|
|
197
|
+
tactus_module_path = Path(tactus.__file__).resolve()
|
|
198
|
+
# Go up from tactus/__init__.py to the repo root
|
|
199
|
+
repo_root = tactus_module_path.parent.parent
|
|
200
|
+
if (repo_root / "tactus").is_dir() and (repo_root / "pyproject.toml").exists():
|
|
201
|
+
return repo_root
|
|
202
|
+
except Exception:
|
|
203
|
+
pass
|
|
204
|
+
|
|
205
|
+
# Option 3: Check current working directory
|
|
206
|
+
cwd = Path.cwd()
|
|
207
|
+
if (cwd / "tactus").is_dir() and (cwd / "pyproject.toml").exists():
|
|
208
|
+
return cwd
|
|
209
|
+
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
def _build_docker_command(
|
|
213
|
+
self,
|
|
214
|
+
working_dir: Path,
|
|
215
|
+
mcp_servers_path: Optional[Path] = None,
|
|
216
|
+
extra_env: Optional[Dict[str, str]] = None,
|
|
217
|
+
execution_id: Optional[str] = None,
|
|
218
|
+
callback_url: Optional[str] = None,
|
|
219
|
+
volume_base_dir: Optional[Path] = None,
|
|
220
|
+
) -> List[str]:
|
|
221
|
+
"""
|
|
222
|
+
Build the docker run command.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
working_dir: Host directory to mount as workspace
|
|
226
|
+
mcp_servers_path: Optional path to MCP servers directory
|
|
227
|
+
extra_env: Additional environment variables
|
|
228
|
+
execution_id: Unique execution ID for container naming
|
|
229
|
+
Returns:
|
|
230
|
+
List of command arguments for subprocess.
|
|
231
|
+
"""
|
|
232
|
+
# Generate container name: tactus-sandbox-{execution_id}
|
|
233
|
+
container_name = (
|
|
234
|
+
f"tactus-sandbox-{execution_id}"
|
|
235
|
+
if execution_id
|
|
236
|
+
else f"tactus-sandbox-{uuid.uuid4().hex[:8]}"
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
cmd = [
|
|
240
|
+
"docker",
|
|
241
|
+
"run",
|
|
242
|
+
"--rm", # Remove container after exit
|
|
243
|
+
"-i", # Interactive (keep stdin open)
|
|
244
|
+
"--name",
|
|
245
|
+
container_name,
|
|
246
|
+
]
|
|
247
|
+
|
|
248
|
+
cmd.extend(["--network", self.config.network])
|
|
249
|
+
|
|
250
|
+
# Resource limits
|
|
251
|
+
if self.config.limits.memory:
|
|
252
|
+
cmd.extend(["--memory", self.config.limits.memory])
|
|
253
|
+
if self.config.limits.cpus:
|
|
254
|
+
cmd.extend(["--cpus", self.config.limits.cpus])
|
|
255
|
+
|
|
256
|
+
# Mount working directory
|
|
257
|
+
cmd.extend(["-v", f"{working_dir}:/workspace:rw"])
|
|
258
|
+
|
|
259
|
+
# Mount MCP servers if available
|
|
260
|
+
if mcp_servers_path and mcp_servers_path.exists():
|
|
261
|
+
cmd.extend(["-v", f"{mcp_servers_path}:/mcp-servers:ro"])
|
|
262
|
+
|
|
263
|
+
# Development mode: mount live Tactus source code
|
|
264
|
+
if self.config.dev_mode:
|
|
265
|
+
tactus_src_dir = self._find_tactus_source_dir()
|
|
266
|
+
if tactus_src_dir:
|
|
267
|
+
logger.info(f"[DEV MODE] Mounting live Tactus source from: {tactus_src_dir}")
|
|
268
|
+
cmd.extend(["-v", f"{tactus_src_dir}/tactus:/app/tactus:ro"])
|
|
269
|
+
else:
|
|
270
|
+
logger.warning(
|
|
271
|
+
"[DEV MODE] Could not locate Tactus source directory, using baked-in version"
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Additional user-configured volumes
|
|
275
|
+
for volume in self.config.volumes:
|
|
276
|
+
cmd.extend(["-v", self._normalize_volume_spec(volume, base_dir=volume_base_dir)])
|
|
277
|
+
|
|
278
|
+
# User-configured additional env vars
|
|
279
|
+
for key, value in self.config.env.items():
|
|
280
|
+
if key in self._BLOCKED_CONTAINER_ENV_KEYS:
|
|
281
|
+
logger.warning(f"[SANDBOX] Refusing to pass secret env var into container: {key}")
|
|
282
|
+
continue
|
|
283
|
+
cmd.extend(["--env", f"{key}={value}"])
|
|
284
|
+
|
|
285
|
+
# Optional per-run callback URL for HTTP event streaming (IDE).
|
|
286
|
+
if callback_url:
|
|
287
|
+
cmd.extend(["--env", f"TACTUS_CALLBACK_URL={callback_url}"])
|
|
288
|
+
|
|
289
|
+
# Extra env vars for this run
|
|
290
|
+
if extra_env:
|
|
291
|
+
for key, value in extra_env.items():
|
|
292
|
+
cmd.extend(["--env", f"{key}={value}"])
|
|
293
|
+
|
|
294
|
+
# Working directory inside container
|
|
295
|
+
cmd.extend(["-w", "/workspace"])
|
|
296
|
+
|
|
297
|
+
# Image name
|
|
298
|
+
cmd.append(self.config.image)
|
|
299
|
+
|
|
300
|
+
return cmd
|
|
301
|
+
|
|
302
|
+
def _normalize_volume_spec(self, volume: str, base_dir: Optional[Path]) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Normalize a docker volume spec.
|
|
305
|
+
|
|
306
|
+
Docker only accepts absolute host paths for bind mounts. For convenience,
|
|
307
|
+
allow sidecar configs to use relative paths and normalize them here.
|
|
308
|
+
|
|
309
|
+
Expected formats:
|
|
310
|
+
- /abs/host:/container[:mode]
|
|
311
|
+
- ./rel/host:/container[:mode]
|
|
312
|
+
- ../rel/host:/container[:mode]
|
|
313
|
+
- volume_name:/container[:mode] (left unchanged)
|
|
314
|
+
"""
|
|
315
|
+
# Basic split: host:container[:mode]
|
|
316
|
+
parts = volume.split(":")
|
|
317
|
+
if len(parts) < 2:
|
|
318
|
+
return volume
|
|
319
|
+
|
|
320
|
+
host = parts[0]
|
|
321
|
+
container = parts[1]
|
|
322
|
+
mode = parts[2] if len(parts) > 2 else None
|
|
323
|
+
|
|
324
|
+
host_is_path = host.startswith(("/", "./", "../", "~"))
|
|
325
|
+
if not host_is_path:
|
|
326
|
+
# Named volume (or other special form) - leave unchanged
|
|
327
|
+
return volume
|
|
328
|
+
|
|
329
|
+
host_path = Path(host).expanduser()
|
|
330
|
+
if not host_path.is_absolute():
|
|
331
|
+
host_path = (base_dir or Path.cwd()) / host_path
|
|
332
|
+
host_path = host_path.resolve()
|
|
333
|
+
|
|
334
|
+
if mode:
|
|
335
|
+
return f"{host_path}:{container}:{mode}"
|
|
336
|
+
return f"{host_path}:{container}"
|
|
337
|
+
|
|
338
|
+
async def run(
|
|
339
|
+
self,
|
|
340
|
+
source: str,
|
|
341
|
+
params: Optional[Dict[str, Any]] = None,
|
|
342
|
+
source_file_path: Optional[str] = None,
|
|
343
|
+
working_dir: Optional[Path] = None,
|
|
344
|
+
format: str = "lua",
|
|
345
|
+
event_handler: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
346
|
+
callback_url: Optional[str] = None,
|
|
347
|
+
) -> ExecutionResult:
|
|
348
|
+
"""
|
|
349
|
+
Execute a procedure in a sandboxed container.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
source: Procedure source code (.tac content)
|
|
353
|
+
params: Input parameters for the procedure
|
|
354
|
+
source_file_path: Original source file path (for error messages)
|
|
355
|
+
working_dir: Working directory to use (default: temp directory)
|
|
356
|
+
format: Source format ("lua" for .tac files, "yaml" for legacy)
|
|
357
|
+
event_handler: Optional host callback for streaming events from the container
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
ExecutionResult with status, result/error, and metadata.
|
|
361
|
+
"""
|
|
362
|
+
# Ensure sandbox is up to date (auto-rebuild if code changed)
|
|
363
|
+
# Skip for IDE to avoid blocking UI - IDE has its own rebuild mechanism
|
|
364
|
+
skip_rebuild_for_ide = (event_handler is not None) or (callback_url is not None)
|
|
365
|
+
self._ensure_sandbox_up_to_date(skip_for_ide=skip_rebuild_for_ide)
|
|
366
|
+
|
|
367
|
+
execution_id = str(uuid.uuid4())[:8]
|
|
368
|
+
start_time = time.time()
|
|
369
|
+
broker_server = None
|
|
370
|
+
|
|
371
|
+
# Create temporary workspace if not provided
|
|
372
|
+
temp_dir = None
|
|
373
|
+
if working_dir is None:
|
|
374
|
+
temp_dir = tempfile.mkdtemp(prefix="tactus-sandbox-")
|
|
375
|
+
working_dir = Path(temp_dir)
|
|
376
|
+
|
|
377
|
+
# If we have a source file, copy its directory contents
|
|
378
|
+
if source_file_path:
|
|
379
|
+
src_dir = Path(source_file_path).parent
|
|
380
|
+
if src_dir.exists():
|
|
381
|
+
for item in src_dir.iterdir():
|
|
382
|
+
if item.is_file():
|
|
383
|
+
shutil.copy2(item, working_dir / item.name)
|
|
384
|
+
elif item.is_dir() and not item.name.startswith("."):
|
|
385
|
+
shutil.copytree(item, working_dir / item.name)
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
# Get MCP servers path
|
|
389
|
+
mcp_path = self.config.get_mcp_servers_path()
|
|
390
|
+
|
|
391
|
+
# Resolve relative bind-mount paths in sandbox.volumes relative to the procedure file
|
|
392
|
+
# when available (makes sidecar configs portable).
|
|
393
|
+
volume_base_dir = None
|
|
394
|
+
if source_file_path:
|
|
395
|
+
try:
|
|
396
|
+
volume_base_dir = Path(source_file_path).resolve().parent
|
|
397
|
+
except Exception:
|
|
398
|
+
volume_base_dir = None
|
|
399
|
+
|
|
400
|
+
# Configure broker transport for this run.
|
|
401
|
+
broker_transport = (self.config.broker_transport or "stdio").lower()
|
|
402
|
+
broker_env: dict[str, str]
|
|
403
|
+
|
|
404
|
+
if broker_transport == "stdio":
|
|
405
|
+
from tactus.broker.stdio import STDIO_TRANSPORT_VALUE
|
|
406
|
+
|
|
407
|
+
broker_env = {"TACTUS_BROKER_SOCKET": STDIO_TRANSPORT_VALUE}
|
|
408
|
+
elif broker_transport in ("tcp", "tls"):
|
|
409
|
+
if self.config.network == "none":
|
|
410
|
+
raise SandboxError(
|
|
411
|
+
"sandbox.broker_transport requires container networking. "
|
|
412
|
+
"Set sandbox.network to 'bridge' (or another non-'none' mode)."
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
from tactus.broker.server import OpenAIChatBackend, TcpBrokerServer
|
|
416
|
+
|
|
417
|
+
ssl_context = None
|
|
418
|
+
if broker_transport == "tls":
|
|
419
|
+
if not self.config.broker_tls_cert_file or not self.config.broker_tls_key_file:
|
|
420
|
+
raise SandboxError(
|
|
421
|
+
"sandbox.broker_transport='tls' requires "
|
|
422
|
+
"sandbox.broker_tls_cert_file and sandbox.broker_tls_key_file"
|
|
423
|
+
)
|
|
424
|
+
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
|
425
|
+
ssl_context.load_cert_chain(
|
|
426
|
+
certfile=self.config.broker_tls_cert_file,
|
|
427
|
+
keyfile=self.config.broker_tls_key_file,
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
broker_server = TcpBrokerServer(
|
|
431
|
+
host=self.config.broker_bind_host,
|
|
432
|
+
port=self.config.broker_port,
|
|
433
|
+
ssl_context=ssl_context,
|
|
434
|
+
openai_backend=OpenAIChatBackend(),
|
|
435
|
+
event_handler=event_handler,
|
|
436
|
+
)
|
|
437
|
+
await broker_server.start()
|
|
438
|
+
if broker_server.bound_port is None:
|
|
439
|
+
raise SandboxError("Failed to determine TCP broker listen port")
|
|
440
|
+
|
|
441
|
+
scheme = "tls" if broker_transport == "tls" else "tcp"
|
|
442
|
+
broker_env = {
|
|
443
|
+
"TACTUS_BROKER_SOCKET": f"{scheme}://{self.config.broker_host}:{broker_server.bound_port}"
|
|
444
|
+
}
|
|
445
|
+
else:
|
|
446
|
+
raise SandboxError(
|
|
447
|
+
f"Unsupported sandbox.broker_transport: {self.config.broker_transport!r}"
|
|
448
|
+
)
|
|
449
|
+
docker_cmd = self._build_docker_command(
|
|
450
|
+
working_dir=working_dir,
|
|
451
|
+
mcp_servers_path=mcp_path if mcp_path.exists() else None,
|
|
452
|
+
extra_env=broker_env,
|
|
453
|
+
execution_id=execution_id,
|
|
454
|
+
callback_url=callback_url,
|
|
455
|
+
volume_base_dir=volume_base_dir,
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
logger.debug(f"Docker command: {' '.join(docker_cmd)}")
|
|
459
|
+
|
|
460
|
+
# Create execution request
|
|
461
|
+
request = ExecutionRequest(
|
|
462
|
+
source=source,
|
|
463
|
+
working_dir="/workspace",
|
|
464
|
+
params=params or {},
|
|
465
|
+
execution_id=execution_id,
|
|
466
|
+
source_file_path=source_file_path,
|
|
467
|
+
format=format,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Run container
|
|
471
|
+
# If TCP broker is active, run it concurrently with the container
|
|
472
|
+
if broker_transport in ("tcp", "tls") and broker_server is not None:
|
|
473
|
+
|
|
474
|
+
async def run_broker_server():
|
|
475
|
+
"""Serve broker connections until explicitly closed."""
|
|
476
|
+
try:
|
|
477
|
+
await broker_server.serve()
|
|
478
|
+
except Exception:
|
|
479
|
+
# Broker server was closed (expected on cleanup)
|
|
480
|
+
pass
|
|
481
|
+
|
|
482
|
+
# Run broker and container concurrently
|
|
483
|
+
broker_task = asyncio.create_task(run_broker_server())
|
|
484
|
+
try:
|
|
485
|
+
result = await self._run_container(
|
|
486
|
+
docker_cmd,
|
|
487
|
+
request,
|
|
488
|
+
timeout=self.config.timeout,
|
|
489
|
+
event_handler=event_handler,
|
|
490
|
+
)
|
|
491
|
+
finally:
|
|
492
|
+
# Cancel broker task when container finishes
|
|
493
|
+
broker_task.cancel()
|
|
494
|
+
try:
|
|
495
|
+
await broker_task
|
|
496
|
+
except asyncio.CancelledError:
|
|
497
|
+
pass
|
|
498
|
+
else:
|
|
499
|
+
result = await self._run_container(
|
|
500
|
+
docker_cmd,
|
|
501
|
+
request,
|
|
502
|
+
timeout=self.config.timeout,
|
|
503
|
+
event_handler=event_handler,
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
result.duration_seconds = time.time() - start_time
|
|
507
|
+
return result
|
|
508
|
+
|
|
509
|
+
except asyncio.TimeoutError:
|
|
510
|
+
return ExecutionResult.timeout(
|
|
511
|
+
duration_seconds=time.time() - start_time,
|
|
512
|
+
)
|
|
513
|
+
except Exception as e:
|
|
514
|
+
logger.exception(f"Sandbox execution failed: {e}")
|
|
515
|
+
return ExecutionResult.failure(
|
|
516
|
+
error=str(e),
|
|
517
|
+
error_type=type(e).__name__,
|
|
518
|
+
duration_seconds=time.time() - start_time,
|
|
519
|
+
)
|
|
520
|
+
finally:
|
|
521
|
+
if broker_server is not None:
|
|
522
|
+
try:
|
|
523
|
+
await broker_server.aclose()
|
|
524
|
+
except Exception:
|
|
525
|
+
logger.debug("[BROKER] Failed to close broker server", exc_info=True)
|
|
526
|
+
|
|
527
|
+
# Cleanup temp directory
|
|
528
|
+
if temp_dir:
|
|
529
|
+
try:
|
|
530
|
+
shutil.rmtree(temp_dir)
|
|
531
|
+
except Exception as e:
|
|
532
|
+
logger.warning(f"Failed to cleanup temp dir: {e}")
|
|
533
|
+
|
|
534
|
+
async def _run_container(
|
|
535
|
+
self,
|
|
536
|
+
docker_cmd: List[str],
|
|
537
|
+
request: ExecutionRequest,
|
|
538
|
+
timeout: int,
|
|
539
|
+
event_handler: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
540
|
+
) -> ExecutionResult:
|
|
541
|
+
"""
|
|
542
|
+
Run the container and communicate via stdio.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
docker_cmd: Docker command to execute
|
|
546
|
+
request: Execution request to send
|
|
547
|
+
timeout: Timeout in seconds
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
ExecutionResult from container.
|
|
551
|
+
"""
|
|
552
|
+
broker_transport = (self.config.broker_transport or "stdio").lower()
|
|
553
|
+
|
|
554
|
+
stdio_request_prefix: str | None = None
|
|
555
|
+
if broker_transport == "stdio":
|
|
556
|
+
from tactus.broker.server import OpenAIChatBackend
|
|
557
|
+
from tactus.broker.server import HostToolRegistry
|
|
558
|
+
from tactus.broker.stdio import STDIO_REQUEST_PREFIX
|
|
559
|
+
|
|
560
|
+
stdio_request_prefix = STDIO_REQUEST_PREFIX
|
|
561
|
+
openai_backend = OpenAIChatBackend()
|
|
562
|
+
tool_registry = HostToolRegistry.default()
|
|
563
|
+
|
|
564
|
+
async def send_event(writer: asyncio.StreamWriter, event: dict[str, Any]) -> None:
|
|
565
|
+
if writer.is_closing():
|
|
566
|
+
return
|
|
567
|
+
try:
|
|
568
|
+
writer.write(
|
|
569
|
+
(
|
|
570
|
+
json.dumps(event, ensure_ascii=False, separators=(",", ":")) + "\n"
|
|
571
|
+
).encode("utf-8")
|
|
572
|
+
)
|
|
573
|
+
await writer.drain()
|
|
574
|
+
except (BrokenPipeError, ConnectionResetError):
|
|
575
|
+
return
|
|
576
|
+
|
|
577
|
+
async def handle_broker_request(
|
|
578
|
+
writer: asyncio.StreamWriter, req: dict[str, Any]
|
|
579
|
+
) -> None:
|
|
580
|
+
req_id = req.get("id") or ""
|
|
581
|
+
method = req.get("method")
|
|
582
|
+
params = req.get("params") or {}
|
|
583
|
+
|
|
584
|
+
if not isinstance(req_id, str) or not isinstance(method, str):
|
|
585
|
+
await send_event(
|
|
586
|
+
writer,
|
|
587
|
+
{
|
|
588
|
+
"id": str(req_id) if req_id else "",
|
|
589
|
+
"event": "error",
|
|
590
|
+
"error": {"type": "BadRequest", "message": "Missing id/method"},
|
|
591
|
+
},
|
|
592
|
+
)
|
|
593
|
+
return
|
|
594
|
+
|
|
595
|
+
if method == "events.emit":
|
|
596
|
+
event = params.get("event") if isinstance(params, dict) else None
|
|
597
|
+
if isinstance(event, dict) and event_handler is not None:
|
|
598
|
+
try:
|
|
599
|
+
event_handler(event)
|
|
600
|
+
except Exception:
|
|
601
|
+
logger.debug("[BROKER] event_handler raised", exc_info=True)
|
|
602
|
+
await send_event(writer, {"id": req_id, "event": "done", "data": {"ok": True}})
|
|
603
|
+
return
|
|
604
|
+
|
|
605
|
+
if method == "tool.call":
|
|
606
|
+
name = params.get("name") if isinstance(params, dict) else None
|
|
607
|
+
args = params.get("args") if isinstance(params, dict) else None
|
|
608
|
+
if args is None:
|
|
609
|
+
args = {}
|
|
610
|
+
|
|
611
|
+
if not isinstance(name, str) or not name:
|
|
612
|
+
await send_event(
|
|
613
|
+
writer,
|
|
614
|
+
{
|
|
615
|
+
"id": req_id,
|
|
616
|
+
"event": "error",
|
|
617
|
+
"error": {
|
|
618
|
+
"type": "BadRequest",
|
|
619
|
+
"message": "params.name must be a string",
|
|
620
|
+
},
|
|
621
|
+
},
|
|
622
|
+
)
|
|
623
|
+
return
|
|
624
|
+
if not isinstance(args, dict):
|
|
625
|
+
await send_event(
|
|
626
|
+
writer,
|
|
627
|
+
{
|
|
628
|
+
"id": req_id,
|
|
629
|
+
"event": "error",
|
|
630
|
+
"error": {
|
|
631
|
+
"type": "BadRequest",
|
|
632
|
+
"message": "params.args must be an object",
|
|
633
|
+
},
|
|
634
|
+
},
|
|
635
|
+
)
|
|
636
|
+
return
|
|
637
|
+
|
|
638
|
+
try:
|
|
639
|
+
result = tool_registry.call(name, args)
|
|
640
|
+
except KeyError:
|
|
641
|
+
await send_event(
|
|
642
|
+
writer,
|
|
643
|
+
{
|
|
644
|
+
"id": req_id,
|
|
645
|
+
"event": "error",
|
|
646
|
+
"error": {
|
|
647
|
+
"type": "ToolNotAllowed",
|
|
648
|
+
"message": f"Tool not allowlisted: {name}",
|
|
649
|
+
},
|
|
650
|
+
},
|
|
651
|
+
)
|
|
652
|
+
return
|
|
653
|
+
except Exception as e:
|
|
654
|
+
logger.debug("[BROKER] tool.call error", exc_info=True)
|
|
655
|
+
await send_event(
|
|
656
|
+
writer,
|
|
657
|
+
{
|
|
658
|
+
"id": req_id,
|
|
659
|
+
"event": "error",
|
|
660
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
661
|
+
},
|
|
662
|
+
)
|
|
663
|
+
return
|
|
664
|
+
|
|
665
|
+
await send_event(
|
|
666
|
+
writer, {"id": req_id, "event": "done", "data": {"result": result}}
|
|
667
|
+
)
|
|
668
|
+
return
|
|
669
|
+
|
|
670
|
+
if method != "llm.chat":
|
|
671
|
+
await send_event(
|
|
672
|
+
writer,
|
|
673
|
+
{
|
|
674
|
+
"id": req_id,
|
|
675
|
+
"event": "error",
|
|
676
|
+
"error": {
|
|
677
|
+
"type": "MethodNotFound",
|
|
678
|
+
"message": f"Unknown method: {method}",
|
|
679
|
+
},
|
|
680
|
+
},
|
|
681
|
+
)
|
|
682
|
+
return
|
|
683
|
+
|
|
684
|
+
provider = (
|
|
685
|
+
params.get("provider") if isinstance(params, dict) else None
|
|
686
|
+
) or "openai"
|
|
687
|
+
if provider != "openai":
|
|
688
|
+
await send_event(
|
|
689
|
+
writer,
|
|
690
|
+
{
|
|
691
|
+
"id": req_id,
|
|
692
|
+
"event": "error",
|
|
693
|
+
"error": {
|
|
694
|
+
"type": "UnsupportedProvider",
|
|
695
|
+
"message": f"Unsupported provider: {provider}",
|
|
696
|
+
},
|
|
697
|
+
},
|
|
698
|
+
)
|
|
699
|
+
return
|
|
700
|
+
|
|
701
|
+
model = params.get("model") if isinstance(params, dict) else None
|
|
702
|
+
messages = params.get("messages") if isinstance(params, dict) else None
|
|
703
|
+
stream = bool(params.get("stream", False)) if isinstance(params, dict) else False
|
|
704
|
+
temperature = params.get("temperature") if isinstance(params, dict) else None
|
|
705
|
+
max_tokens = params.get("max_tokens") if isinstance(params, dict) else None
|
|
706
|
+
|
|
707
|
+
if not isinstance(model, str) or not model:
|
|
708
|
+
await send_event(
|
|
709
|
+
writer,
|
|
710
|
+
{
|
|
711
|
+
"id": req_id,
|
|
712
|
+
"event": "error",
|
|
713
|
+
"error": {
|
|
714
|
+
"type": "BadRequest",
|
|
715
|
+
"message": "params.model must be a string",
|
|
716
|
+
},
|
|
717
|
+
},
|
|
718
|
+
)
|
|
719
|
+
return
|
|
720
|
+
if not isinstance(messages, list):
|
|
721
|
+
await send_event(
|
|
722
|
+
writer,
|
|
723
|
+
{
|
|
724
|
+
"id": req_id,
|
|
725
|
+
"event": "error",
|
|
726
|
+
"error": {
|
|
727
|
+
"type": "BadRequest",
|
|
728
|
+
"message": "params.messages must be a list",
|
|
729
|
+
},
|
|
730
|
+
},
|
|
731
|
+
)
|
|
732
|
+
return
|
|
733
|
+
|
|
734
|
+
try:
|
|
735
|
+
if stream:
|
|
736
|
+
stream_iter = await openai_backend.chat(
|
|
737
|
+
model=model,
|
|
738
|
+
messages=messages,
|
|
739
|
+
temperature=temperature,
|
|
740
|
+
max_tokens=max_tokens,
|
|
741
|
+
stream=True,
|
|
742
|
+
)
|
|
743
|
+
full_text = ""
|
|
744
|
+
async for chunk in stream_iter:
|
|
745
|
+
try:
|
|
746
|
+
delta = chunk.choices[0].delta
|
|
747
|
+
text = getattr(delta, "content", None)
|
|
748
|
+
except Exception:
|
|
749
|
+
text = None
|
|
750
|
+
|
|
751
|
+
if not text:
|
|
752
|
+
continue
|
|
753
|
+
|
|
754
|
+
full_text += text
|
|
755
|
+
await send_event(
|
|
756
|
+
writer, {"id": req_id, "event": "delta", "data": {"text": text}}
|
|
757
|
+
)
|
|
758
|
+
|
|
759
|
+
await send_event(
|
|
760
|
+
writer,
|
|
761
|
+
{
|
|
762
|
+
"id": req_id,
|
|
763
|
+
"event": "done",
|
|
764
|
+
"data": {
|
|
765
|
+
"text": full_text,
|
|
766
|
+
"usage": {
|
|
767
|
+
"prompt_tokens": 0,
|
|
768
|
+
"completion_tokens": 0,
|
|
769
|
+
"total_tokens": 0,
|
|
770
|
+
},
|
|
771
|
+
},
|
|
772
|
+
},
|
|
773
|
+
)
|
|
774
|
+
return
|
|
775
|
+
|
|
776
|
+
resp = await openai_backend.chat(
|
|
777
|
+
model=model,
|
|
778
|
+
messages=messages,
|
|
779
|
+
temperature=temperature,
|
|
780
|
+
max_tokens=max_tokens,
|
|
781
|
+
stream=False,
|
|
782
|
+
)
|
|
783
|
+
text = ""
|
|
784
|
+
try:
|
|
785
|
+
text = resp.choices[0].message.content or ""
|
|
786
|
+
except Exception:
|
|
787
|
+
text = ""
|
|
788
|
+
|
|
789
|
+
await send_event(
|
|
790
|
+
writer,
|
|
791
|
+
{
|
|
792
|
+
"id": req_id,
|
|
793
|
+
"event": "done",
|
|
794
|
+
"data": {
|
|
795
|
+
"text": text,
|
|
796
|
+
"usage": {
|
|
797
|
+
"prompt_tokens": 0,
|
|
798
|
+
"completion_tokens": 0,
|
|
799
|
+
"total_tokens": 0,
|
|
800
|
+
},
|
|
801
|
+
},
|
|
802
|
+
},
|
|
803
|
+
)
|
|
804
|
+
except Exception as e:
|
|
805
|
+
logger.debug("[BROKER] llm.chat error", exc_info=True)
|
|
806
|
+
await send_event(
|
|
807
|
+
writer,
|
|
808
|
+
{
|
|
809
|
+
"id": req_id,
|
|
810
|
+
"event": "error",
|
|
811
|
+
"error": {"type": type(e).__name__, "message": str(e)},
|
|
812
|
+
},
|
|
813
|
+
)
|
|
814
|
+
|
|
815
|
+
else:
|
|
816
|
+
|
|
817
|
+
async def handle_broker_request(
|
|
818
|
+
writer: asyncio.StreamWriter, req: dict[str, Any]
|
|
819
|
+
) -> None:
|
|
820
|
+
raise RuntimeError("Broker requests are not expected in non-stdio transports")
|
|
821
|
+
|
|
822
|
+
# Start container process
|
|
823
|
+
process = await asyncio.create_subprocess_exec(
|
|
824
|
+
*docker_cmd,
|
|
825
|
+
stdin=asyncio.subprocess.PIPE,
|
|
826
|
+
stdout=asyncio.subprocess.PIPE,
|
|
827
|
+
stderr=asyncio.subprocess.PIPE,
|
|
828
|
+
)
|
|
829
|
+
logger.debug(f"[SANDBOX] Spawned container process pid={process.pid}")
|
|
830
|
+
|
|
831
|
+
stdout_task: asyncio.Task[None] | None = None
|
|
832
|
+
stderr_task: asyncio.Task[None] | None = None
|
|
833
|
+
wait_task: asyncio.Task[int] | None = None
|
|
834
|
+
|
|
835
|
+
try:
|
|
836
|
+
assert process.stdin is not None
|
|
837
|
+
assert process.stdout is not None
|
|
838
|
+
assert process.stderr is not None
|
|
839
|
+
|
|
840
|
+
# Send request as a single JSON line, then keep stdin open for broker responses.
|
|
841
|
+
request_line = (request.to_json() + "\n").encode("utf-8")
|
|
842
|
+
process.stdin.write(request_line)
|
|
843
|
+
await process.stdin.drain()
|
|
844
|
+
logger.debug(f"[SANDBOX] Sent ExecutionRequest bytes={len(request_line)}")
|
|
845
|
+
|
|
846
|
+
stdout_bytes = bytearray()
|
|
847
|
+
result_future: asyncio.Future[ExecutionResult] = (
|
|
848
|
+
asyncio.get_running_loop().create_future()
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
async def stdout_loop() -> None:
|
|
852
|
+
in_result = False
|
|
853
|
+
result_lines: list[str] = []
|
|
854
|
+
|
|
855
|
+
while True:
|
|
856
|
+
raw = await process.stdout.readline()
|
|
857
|
+
if not raw:
|
|
858
|
+
return
|
|
859
|
+
|
|
860
|
+
stdout_bytes.extend(raw)
|
|
861
|
+
line = raw.decode("utf-8", errors="replace").rstrip("\n")
|
|
862
|
+
|
|
863
|
+
if not in_result:
|
|
864
|
+
if line == RESULT_START_MARKER:
|
|
865
|
+
in_result = True
|
|
866
|
+
result_lines = []
|
|
867
|
+
continue
|
|
868
|
+
|
|
869
|
+
if line == RESULT_END_MARKER:
|
|
870
|
+
json_str = "\n".join(result_lines).strip()
|
|
871
|
+
try:
|
|
872
|
+
parsed = ExecutionResult.from_json(json_str)
|
|
873
|
+
except Exception:
|
|
874
|
+
in_result = False
|
|
875
|
+
continue
|
|
876
|
+
|
|
877
|
+
if not result_future.done():
|
|
878
|
+
result_future.set_result(parsed)
|
|
879
|
+
|
|
880
|
+
in_result = False
|
|
881
|
+
continue
|
|
882
|
+
|
|
883
|
+
result_lines.append(line)
|
|
884
|
+
|
|
885
|
+
async def stderr_loop() -> None:
|
|
886
|
+
while True:
|
|
887
|
+
raw = await process.stderr.readline()
|
|
888
|
+
if not raw:
|
|
889
|
+
return
|
|
890
|
+
line = raw.decode("utf-8", errors="replace").rstrip("\n")
|
|
891
|
+
if stdio_request_prefix is not None and line.startswith(stdio_request_prefix):
|
|
892
|
+
payload = line[len(stdio_request_prefix) :]
|
|
893
|
+
try:
|
|
894
|
+
req_obj = json.loads(payload)
|
|
895
|
+
except json.JSONDecodeError:
|
|
896
|
+
logger.debug("[BROKER] Failed to decode stdio broker request JSON")
|
|
897
|
+
continue
|
|
898
|
+
if isinstance(req_obj, dict):
|
|
899
|
+
await handle_broker_request(process.stdin, req_obj)
|
|
900
|
+
continue
|
|
901
|
+
|
|
902
|
+
if line:
|
|
903
|
+
logger.info(f"[container] {line}")
|
|
904
|
+
|
|
905
|
+
stdout_task = asyncio.create_task(stdout_loop())
|
|
906
|
+
stderr_task = asyncio.create_task(stderr_loop())
|
|
907
|
+
wait_task = asyncio.create_task(process.wait())
|
|
908
|
+
|
|
909
|
+
loop = asyncio.get_running_loop()
|
|
910
|
+
deadline = loop.time() + timeout
|
|
911
|
+
stdin_closed = False
|
|
912
|
+
|
|
913
|
+
while True:
|
|
914
|
+
remaining = deadline - loop.time()
|
|
915
|
+
if remaining <= 0:
|
|
916
|
+
raise asyncio.TimeoutError
|
|
917
|
+
|
|
918
|
+
done, _pending = await asyncio.wait(
|
|
919
|
+
{wait_task, result_future},
|
|
920
|
+
timeout=remaining,
|
|
921
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
922
|
+
)
|
|
923
|
+
|
|
924
|
+
# Once we have a structured result, signal EOF to the container process.
|
|
925
|
+
# Some runtimes (notably Docker Desktop attach mode) can keep the outer
|
|
926
|
+
# process alive until stdin is closed.
|
|
927
|
+
if result_future in done and not stdin_closed:
|
|
928
|
+
try:
|
|
929
|
+
process.stdin.close()
|
|
930
|
+
stdin_closed = True
|
|
931
|
+
except Exception:
|
|
932
|
+
stdin_closed = True
|
|
933
|
+
|
|
934
|
+
if wait_task in done:
|
|
935
|
+
break
|
|
936
|
+
|
|
937
|
+
if not stdin_closed:
|
|
938
|
+
try:
|
|
939
|
+
process.stdin.close()
|
|
940
|
+
except Exception:
|
|
941
|
+
pass
|
|
942
|
+
|
|
943
|
+
try:
|
|
944
|
+
await asyncio.wait_for(stdout_task, timeout=5)
|
|
945
|
+
except asyncio.TimeoutError:
|
|
946
|
+
stdout_task.cancel()
|
|
947
|
+
try:
|
|
948
|
+
await stdout_task
|
|
949
|
+
except Exception:
|
|
950
|
+
pass
|
|
951
|
+
|
|
952
|
+
try:
|
|
953
|
+
await asyncio.wait_for(stderr_task, timeout=5)
|
|
954
|
+
except asyncio.TimeoutError:
|
|
955
|
+
stderr_task.cancel()
|
|
956
|
+
try:
|
|
957
|
+
await stderr_task
|
|
958
|
+
except Exception:
|
|
959
|
+
pass
|
|
960
|
+
|
|
961
|
+
stdout = stdout_bytes.decode("utf-8", errors="replace")
|
|
962
|
+
|
|
963
|
+
# Extract result from stdout
|
|
964
|
+
if result_future.done():
|
|
965
|
+
return result_future.result()
|
|
966
|
+
|
|
967
|
+
result = extract_result_from_stdout(stdout)
|
|
968
|
+
if result is not None:
|
|
969
|
+
return result
|
|
970
|
+
|
|
971
|
+
# No structured result found - check exit code
|
|
972
|
+
if process.returncode == 0:
|
|
973
|
+
# Success but no structured output - treat stdout as result
|
|
974
|
+
return ExecutionResult.success(
|
|
975
|
+
result=stdout.strip() if stdout.strip() else None,
|
|
976
|
+
)
|
|
977
|
+
elif process.returncode == 137:
|
|
978
|
+
# Exit code 137 = killed by OOM (128 + SIGKILL=9)
|
|
979
|
+
return ExecutionResult.failure(
|
|
980
|
+
error=f"Container killed: out of memory (limit: {self.config.limits.memory})",
|
|
981
|
+
error_type="OutOfMemoryError",
|
|
982
|
+
exit_code=137,
|
|
983
|
+
)
|
|
984
|
+
elif process.returncode == 124:
|
|
985
|
+
# Exit code 124 = timeout
|
|
986
|
+
return ExecutionResult.failure(
|
|
987
|
+
error=f"Container killed: execution timeout ({self.config.timeout}s)",
|
|
988
|
+
error_type="TimeoutError",
|
|
989
|
+
exit_code=124,
|
|
990
|
+
)
|
|
991
|
+
else:
|
|
992
|
+
# Failed without structured output
|
|
993
|
+
return ExecutionResult.failure(
|
|
994
|
+
error=stdout.strip() or f"Container exited with code {process.returncode}",
|
|
995
|
+
exit_code=process.returncode or 1,
|
|
996
|
+
)
|
|
997
|
+
|
|
998
|
+
except asyncio.TimeoutError:
|
|
999
|
+
# Kill the container
|
|
1000
|
+
try:
|
|
1001
|
+
try:
|
|
1002
|
+
process.stdin.close()
|
|
1003
|
+
except Exception:
|
|
1004
|
+
pass
|
|
1005
|
+
process.kill()
|
|
1006
|
+
await process.wait()
|
|
1007
|
+
except Exception:
|
|
1008
|
+
pass
|
|
1009
|
+
for task in (stdout_task, stderr_task, wait_task):
|
|
1010
|
+
if task is None:
|
|
1011
|
+
continue
|
|
1012
|
+
task.cancel()
|
|
1013
|
+
try:
|
|
1014
|
+
await task
|
|
1015
|
+
except asyncio.CancelledError:
|
|
1016
|
+
pass
|
|
1017
|
+
except Exception:
|
|
1018
|
+
pass
|
|
1019
|
+
raise
|
|
1020
|
+
|
|
1021
|
+
def _handle_container_stderr(self, stderr: str) -> None:
|
|
1022
|
+
"""
|
|
1023
|
+
Forward container stderr into the host log UX.
|
|
1024
|
+
|
|
1025
|
+
- raw: pass through container stderr as-is (CloudWatch-friendly)
|
|
1026
|
+
- rich/terminal: parse container log lines and re-emit with host formatting
|
|
1027
|
+
"""
|
|
1028
|
+
if not stderr:
|
|
1029
|
+
return
|
|
1030
|
+
|
|
1031
|
+
fmt = str(self.config.env.get("TACTUS_LOG_FORMAT", "rich")).strip().lower()
|
|
1032
|
+
|
|
1033
|
+
# Raw mode: avoid double timestamps by forwarding container stderr directly.
|
|
1034
|
+
if fmt == "raw":
|
|
1035
|
+
sys.stderr.write(stderr)
|
|
1036
|
+
sys.stderr.flush()
|
|
1037
|
+
return
|
|
1038
|
+
|
|
1039
|
+
# Rich/terminal: parse our container log format and re-emit.
|
|
1040
|
+
current: tuple[str, int, list[str]] | None = None # (logger_name, levelno, lines)
|
|
1041
|
+
|
|
1042
|
+
def flush_current() -> None:
|
|
1043
|
+
nonlocal current
|
|
1044
|
+
if current is None:
|
|
1045
|
+
return
|
|
1046
|
+
logger_name, levelno, lines = current
|
|
1047
|
+
message = "\n".join(lines).rstrip("\n")
|
|
1048
|
+
logging.getLogger(logger_name).log(levelno, message)
|
|
1049
|
+
current = None
|
|
1050
|
+
|
|
1051
|
+
for line in stderr.splitlines():
|
|
1052
|
+
m = _CONTAINER_LOG_RE.match(line)
|
|
1053
|
+
if m:
|
|
1054
|
+
flush_current()
|
|
1055
|
+
levelno = _LEVEL_MAP.get(m.group("level"), logging.INFO)
|
|
1056
|
+
current = (m.group("logger"), levelno, [m.group("message")])
|
|
1057
|
+
continue
|
|
1058
|
+
|
|
1059
|
+
# Continuation heuristic: keep multi-line LogEvent context attached.
|
|
1060
|
+
if current is not None and (
|
|
1061
|
+
line == ""
|
|
1062
|
+
or line.startswith((" ", "\t"))
|
|
1063
|
+
or line.startswith("Context:")
|
|
1064
|
+
or line.startswith("{")
|
|
1065
|
+
or line.startswith("[")
|
|
1066
|
+
):
|
|
1067
|
+
current[2].append(line)
|
|
1068
|
+
continue
|
|
1069
|
+
|
|
1070
|
+
# Otherwise treat as standalone stderr (warnings/tracebacks/etc).
|
|
1071
|
+
flush_current()
|
|
1072
|
+
logging.getLogger("container.stderr").warning(line)
|
|
1073
|
+
|
|
1074
|
+
flush_current()
|
|
1075
|
+
|
|
1076
|
+
def run_sync(
|
|
1077
|
+
self,
|
|
1078
|
+
source: str,
|
|
1079
|
+
params: Optional[Dict[str, Any]] = None,
|
|
1080
|
+
source_file_path: Optional[str] = None,
|
|
1081
|
+
working_dir: Optional[Path] = None,
|
|
1082
|
+
format: str = "lua",
|
|
1083
|
+
event_handler: Optional[Callable[[Dict[str, Any]], None]] = None,
|
|
1084
|
+
) -> ExecutionResult:
|
|
1085
|
+
"""
|
|
1086
|
+
Synchronous wrapper for run().
|
|
1087
|
+
|
|
1088
|
+
For use in non-async contexts.
|
|
1089
|
+
"""
|
|
1090
|
+
return asyncio.run(
|
|
1091
|
+
self.run(
|
|
1092
|
+
source=source,
|
|
1093
|
+
params=params,
|
|
1094
|
+
source_file_path=source_file_path,
|
|
1095
|
+
format=format,
|
|
1096
|
+
working_dir=working_dir,
|
|
1097
|
+
event_handler=event_handler,
|
|
1098
|
+
)
|
|
1099
|
+
)
|