tactus 0.31.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tactus/__init__.py +49 -0
- tactus/adapters/__init__.py +9 -0
- tactus/adapters/broker_log.py +76 -0
- tactus/adapters/cli_hitl.py +189 -0
- tactus/adapters/cli_log.py +223 -0
- tactus/adapters/cost_collector_log.py +56 -0
- tactus/adapters/file_storage.py +367 -0
- tactus/adapters/http_callback_log.py +109 -0
- tactus/adapters/ide_log.py +71 -0
- tactus/adapters/lua_tools.py +336 -0
- tactus/adapters/mcp.py +289 -0
- tactus/adapters/mcp_manager.py +196 -0
- tactus/adapters/memory.py +53 -0
- tactus/adapters/plugins.py +419 -0
- tactus/backends/http_backend.py +58 -0
- tactus/backends/model_backend.py +35 -0
- tactus/backends/pytorch_backend.py +110 -0
- tactus/broker/__init__.py +12 -0
- tactus/broker/client.py +247 -0
- tactus/broker/protocol.py +183 -0
- tactus/broker/server.py +1123 -0
- tactus/broker/stdio.py +12 -0
- tactus/cli/__init__.py +7 -0
- tactus/cli/app.py +2245 -0
- tactus/cli/commands/__init__.py +0 -0
- tactus/core/__init__.py +32 -0
- tactus/core/config_manager.py +790 -0
- tactus/core/dependencies/__init__.py +14 -0
- tactus/core/dependencies/registry.py +180 -0
- tactus/core/dsl_stubs.py +2117 -0
- tactus/core/exceptions.py +66 -0
- tactus/core/execution_context.py +480 -0
- tactus/core/lua_sandbox.py +508 -0
- tactus/core/message_history_manager.py +236 -0
- tactus/core/mocking.py +286 -0
- tactus/core/output_validator.py +291 -0
- tactus/core/registry.py +499 -0
- tactus/core/runtime.py +2907 -0
- tactus/core/template_resolver.py +142 -0
- tactus/core/yaml_parser.py +301 -0
- tactus/docker/Dockerfile +61 -0
- tactus/docker/entrypoint.sh +69 -0
- tactus/dspy/__init__.py +39 -0
- tactus/dspy/agent.py +1144 -0
- tactus/dspy/broker_lm.py +181 -0
- tactus/dspy/config.py +212 -0
- tactus/dspy/history.py +196 -0
- tactus/dspy/module.py +405 -0
- tactus/dspy/prediction.py +318 -0
- tactus/dspy/signature.py +185 -0
- tactus/formatting/__init__.py +7 -0
- tactus/formatting/formatter.py +437 -0
- tactus/ide/__init__.py +9 -0
- tactus/ide/coding_assistant.py +343 -0
- tactus/ide/server.py +2223 -0
- tactus/primitives/__init__.py +49 -0
- tactus/primitives/control.py +168 -0
- tactus/primitives/file.py +229 -0
- tactus/primitives/handles.py +378 -0
- tactus/primitives/host.py +94 -0
- tactus/primitives/human.py +342 -0
- tactus/primitives/json.py +189 -0
- tactus/primitives/log.py +187 -0
- tactus/primitives/message_history.py +157 -0
- tactus/primitives/model.py +163 -0
- tactus/primitives/procedure.py +564 -0
- tactus/primitives/procedure_callable.py +318 -0
- tactus/primitives/retry.py +155 -0
- tactus/primitives/session.py +152 -0
- tactus/primitives/state.py +182 -0
- tactus/primitives/step.py +209 -0
- tactus/primitives/system.py +93 -0
- tactus/primitives/tool.py +375 -0
- tactus/primitives/tool_handle.py +279 -0
- tactus/primitives/toolset.py +229 -0
- tactus/protocols/__init__.py +38 -0
- tactus/protocols/chat_recorder.py +81 -0
- tactus/protocols/config.py +97 -0
- tactus/protocols/cost.py +31 -0
- tactus/protocols/hitl.py +71 -0
- tactus/protocols/log_handler.py +27 -0
- tactus/protocols/models.py +355 -0
- tactus/protocols/result.py +33 -0
- tactus/protocols/storage.py +90 -0
- tactus/providers/__init__.py +13 -0
- tactus/providers/base.py +92 -0
- tactus/providers/bedrock.py +117 -0
- tactus/providers/google.py +105 -0
- tactus/providers/openai.py +98 -0
- tactus/sandbox/__init__.py +63 -0
- tactus/sandbox/config.py +171 -0
- tactus/sandbox/container_runner.py +1099 -0
- tactus/sandbox/docker_manager.py +433 -0
- tactus/sandbox/entrypoint.py +227 -0
- tactus/sandbox/protocol.py +213 -0
- tactus/stdlib/__init__.py +10 -0
- tactus/stdlib/io/__init__.py +13 -0
- tactus/stdlib/io/csv.py +88 -0
- tactus/stdlib/io/excel.py +136 -0
- tactus/stdlib/io/file.py +90 -0
- tactus/stdlib/io/fs.py +154 -0
- tactus/stdlib/io/hdf5.py +121 -0
- tactus/stdlib/io/json.py +109 -0
- tactus/stdlib/io/parquet.py +83 -0
- tactus/stdlib/io/tsv.py +88 -0
- tactus/stdlib/loader.py +274 -0
- tactus/stdlib/tac/tactus/tools/done.tac +33 -0
- tactus/stdlib/tac/tactus/tools/log.tac +50 -0
- tactus/testing/README.md +273 -0
- tactus/testing/__init__.py +61 -0
- tactus/testing/behave_integration.py +380 -0
- tactus/testing/context.py +486 -0
- tactus/testing/eval_models.py +114 -0
- tactus/testing/evaluation_runner.py +222 -0
- tactus/testing/evaluators.py +634 -0
- tactus/testing/events.py +94 -0
- tactus/testing/gherkin_parser.py +134 -0
- tactus/testing/mock_agent.py +315 -0
- tactus/testing/mock_dependencies.py +234 -0
- tactus/testing/mock_hitl.py +171 -0
- tactus/testing/mock_registry.py +168 -0
- tactus/testing/mock_tools.py +133 -0
- tactus/testing/models.py +115 -0
- tactus/testing/pydantic_eval_runner.py +508 -0
- tactus/testing/steps/__init__.py +13 -0
- tactus/testing/steps/builtin.py +902 -0
- tactus/testing/steps/custom.py +69 -0
- tactus/testing/steps/registry.py +68 -0
- tactus/testing/test_runner.py +489 -0
- tactus/tracing/__init__.py +5 -0
- tactus/tracing/trace_manager.py +417 -0
- tactus/utils/__init__.py +1 -0
- tactus/utils/cost_calculator.py +72 -0
- tactus/utils/model_pricing.py +132 -0
- tactus/utils/safe_file_library.py +502 -0
- tactus/utils/safe_libraries.py +234 -0
- tactus/validation/LuaLexerBase.py +66 -0
- tactus/validation/LuaParserBase.py +23 -0
- tactus/validation/README.md +224 -0
- tactus/validation/__init__.py +7 -0
- tactus/validation/error_listener.py +21 -0
- tactus/validation/generated/LuaLexer.interp +231 -0
- tactus/validation/generated/LuaLexer.py +5548 -0
- tactus/validation/generated/LuaLexer.tokens +124 -0
- tactus/validation/generated/LuaLexerBase.py +66 -0
- tactus/validation/generated/LuaParser.interp +173 -0
- tactus/validation/generated/LuaParser.py +6439 -0
- tactus/validation/generated/LuaParser.tokens +124 -0
- tactus/validation/generated/LuaParserBase.py +23 -0
- tactus/validation/generated/LuaParserVisitor.py +118 -0
- tactus/validation/generated/__init__.py +7 -0
- tactus/validation/grammar/LuaLexer.g4 +123 -0
- tactus/validation/grammar/LuaParser.g4 +178 -0
- tactus/validation/semantic_visitor.py +817 -0
- tactus/validation/validator.py +157 -0
- tactus-0.31.2.dist-info/METADATA +1809 -0
- tactus-0.31.2.dist-info/RECORD +160 -0
- tactus-0.31.2.dist-info/WHEEL +4 -0
- tactus-0.31.2.dist-info/entry_points.txt +2 -0
- tactus-0.31.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File-based storage backend for Tactus.
|
|
3
|
+
|
|
4
|
+
Stores procedure metadata and execution log as JSON files on disk.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Optional, Dict, List
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from tactus.protocols.models import (
|
|
13
|
+
ProcedureMetadata,
|
|
14
|
+
CheckpointEntry,
|
|
15
|
+
ExecutionRun,
|
|
16
|
+
Breakpoint,
|
|
17
|
+
SourceLocation,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class FileStorage:
|
|
22
|
+
"""
|
|
23
|
+
File-based storage backend.
|
|
24
|
+
|
|
25
|
+
Stores each procedure's metadata in a separate JSON file:
|
|
26
|
+
{storage_dir}/{procedure_id}.json
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, storage_dir: str = "~/.tactus/storage"):
|
|
30
|
+
"""
|
|
31
|
+
Initialize file storage.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
storage_dir: Directory to store procedure files
|
|
35
|
+
"""
|
|
36
|
+
self.storage_dir = Path(storage_dir).expanduser()
|
|
37
|
+
|
|
38
|
+
# Try to create directories, but don't fail if we can't
|
|
39
|
+
# This allows read-only testing and defers errors to write operations
|
|
40
|
+
try:
|
|
41
|
+
self.storage_dir.mkdir(parents=True, exist_ok=True)
|
|
42
|
+
except (PermissionError, OSError):
|
|
43
|
+
pass # Defer error to actual write operations
|
|
44
|
+
|
|
45
|
+
# Create subdirectories for tracing
|
|
46
|
+
self.traces_dir = self.storage_dir / "traces"
|
|
47
|
+
self.runs_dir = self.traces_dir / "runs"
|
|
48
|
+
self.breakpoints_dir = self.traces_dir / "breakpoints"
|
|
49
|
+
self.index_file = self.traces_dir / "index.json"
|
|
50
|
+
|
|
51
|
+
# Try to create subdirectories, but defer errors to write operations
|
|
52
|
+
try:
|
|
53
|
+
self.traces_dir.mkdir(parents=True, exist_ok=True)
|
|
54
|
+
self.runs_dir.mkdir(parents=True, exist_ok=True)
|
|
55
|
+
self.breakpoints_dir.mkdir(parents=True, exist_ok=True)
|
|
56
|
+
except (PermissionError, OSError):
|
|
57
|
+
pass # Defer error to actual write operations
|
|
58
|
+
|
|
59
|
+
def _get_file_path(self, procedure_id: str) -> Path:
|
|
60
|
+
"""Get the file path for a procedure."""
|
|
61
|
+
return self.storage_dir / f"{procedure_id}.json"
|
|
62
|
+
|
|
63
|
+
def _read_file(self, procedure_id: str) -> dict:
|
|
64
|
+
"""Read procedure file, return empty dict if not found."""
|
|
65
|
+
file_path = self._get_file_path(procedure_id)
|
|
66
|
+
if not file_path.exists():
|
|
67
|
+
return {}
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
with open(file_path, "r") as f:
|
|
71
|
+
return json.load(f)
|
|
72
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
73
|
+
raise RuntimeError(f"Failed to read procedure file {file_path}: {e}")
|
|
74
|
+
|
|
75
|
+
def _write_file(self, procedure_id: str, data: dict) -> None:
|
|
76
|
+
"""Write procedure data to file."""
|
|
77
|
+
file_path = self._get_file_path(procedure_id)
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
with open(file_path, "w") as f:
|
|
81
|
+
json.dump(data, f, indent=2, default=str)
|
|
82
|
+
except (IOError, OSError) as e:
|
|
83
|
+
raise RuntimeError(f"Failed to write procedure file {file_path}: {e}")
|
|
84
|
+
|
|
85
|
+
def load_procedure_metadata(self, procedure_id: str) -> ProcedureMetadata:
|
|
86
|
+
"""Load procedure metadata from file."""
|
|
87
|
+
data = self._read_file(procedure_id)
|
|
88
|
+
|
|
89
|
+
if not data:
|
|
90
|
+
# Create new metadata
|
|
91
|
+
return ProcedureMetadata(procedure_id=procedure_id)
|
|
92
|
+
|
|
93
|
+
# Convert stored execution log back to CheckpointEntry objects
|
|
94
|
+
execution_log = []
|
|
95
|
+
for entry_data in data.get("execution_log", []):
|
|
96
|
+
# Rebuild SourceLocation if present
|
|
97
|
+
source_location = None
|
|
98
|
+
if entry_data.get("source_location"):
|
|
99
|
+
source_location = SourceLocation(**entry_data["source_location"])
|
|
100
|
+
|
|
101
|
+
execution_log.append(
|
|
102
|
+
CheckpointEntry(
|
|
103
|
+
position=entry_data["position"],
|
|
104
|
+
type=entry_data["type"],
|
|
105
|
+
result=entry_data["result"],
|
|
106
|
+
timestamp=datetime.fromisoformat(entry_data["timestamp"]),
|
|
107
|
+
duration_ms=entry_data.get("duration_ms"),
|
|
108
|
+
input_hash=entry_data.get("input_hash"),
|
|
109
|
+
run_id=entry_data["run_id"],
|
|
110
|
+
source_location=source_location,
|
|
111
|
+
captured_vars=entry_data.get("captured_vars"),
|
|
112
|
+
)
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return ProcedureMetadata(
|
|
116
|
+
procedure_id=procedure_id,
|
|
117
|
+
execution_log=execution_log,
|
|
118
|
+
replay_index=data.get("replay_index", 0),
|
|
119
|
+
state=data.get("state", {}),
|
|
120
|
+
lua_state=data.get("lua_state", {}),
|
|
121
|
+
status=data.get("status", "RUNNING"),
|
|
122
|
+
waiting_on_message_id=data.get("waiting_on_message_id"),
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def save_procedure_metadata(self, procedure_id: str, metadata: ProcedureMetadata) -> None:
|
|
126
|
+
"""Save procedure metadata to file."""
|
|
127
|
+
# Convert to serializable dict
|
|
128
|
+
data = {
|
|
129
|
+
"procedure_id": metadata.procedure_id,
|
|
130
|
+
"execution_log": [
|
|
131
|
+
{
|
|
132
|
+
"position": entry.position,
|
|
133
|
+
"type": entry.type,
|
|
134
|
+
"result": entry.result,
|
|
135
|
+
"timestamp": entry.timestamp.isoformat(),
|
|
136
|
+
"duration_ms": entry.duration_ms,
|
|
137
|
+
"input_hash": entry.input_hash,
|
|
138
|
+
"run_id": entry.run_id,
|
|
139
|
+
"source_location": (
|
|
140
|
+
entry.source_location.model_dump() if entry.source_location else None
|
|
141
|
+
),
|
|
142
|
+
"captured_vars": entry.captured_vars,
|
|
143
|
+
}
|
|
144
|
+
for entry in metadata.execution_log
|
|
145
|
+
],
|
|
146
|
+
"replay_index": metadata.replay_index,
|
|
147
|
+
"state": metadata.state,
|
|
148
|
+
"lua_state": metadata.lua_state,
|
|
149
|
+
"status": metadata.status,
|
|
150
|
+
"waiting_on_message_id": metadata.waiting_on_message_id,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
self._write_file(procedure_id, data)
|
|
154
|
+
|
|
155
|
+
def update_procedure_status(
|
|
156
|
+
self, procedure_id: str, status: str, waiting_on_message_id: Optional[str] = None
|
|
157
|
+
) -> None:
|
|
158
|
+
"""Update procedure status."""
|
|
159
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
160
|
+
metadata.status = status
|
|
161
|
+
metadata.waiting_on_message_id = waiting_on_message_id
|
|
162
|
+
self.save_procedure_metadata(procedure_id, metadata)
|
|
163
|
+
|
|
164
|
+
def get_state(self, procedure_id: str) -> Dict[str, Any]:
|
|
165
|
+
"""Get mutable state dictionary."""
|
|
166
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
167
|
+
return metadata.state
|
|
168
|
+
|
|
169
|
+
def set_state(self, procedure_id: str, state: Dict[str, Any]) -> None:
|
|
170
|
+
"""Set mutable state dictionary."""
|
|
171
|
+
metadata = self.load_procedure_metadata(procedure_id)
|
|
172
|
+
metadata.state = state
|
|
173
|
+
self.save_procedure_metadata(procedure_id, metadata)
|
|
174
|
+
|
|
175
|
+
# Tracing & Debugging Methods
|
|
176
|
+
|
|
177
|
+
def _load_index(self) -> Dict[str, Any]:
|
|
178
|
+
"""Load the run index."""
|
|
179
|
+
if not self.index_file.exists():
|
|
180
|
+
return {}
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
with open(self.index_file, "r") as f:
|
|
184
|
+
return json.load(f)
|
|
185
|
+
except (json.JSONDecodeError, IOError):
|
|
186
|
+
return {}
|
|
187
|
+
|
|
188
|
+
def _save_index(self, index: Dict[str, Any]) -> None:
|
|
189
|
+
"""Save the run index."""
|
|
190
|
+
try:
|
|
191
|
+
with open(self.index_file, "w") as f:
|
|
192
|
+
json.dump(index, f, indent=2, default=str)
|
|
193
|
+
except (IOError, OSError) as e:
|
|
194
|
+
raise RuntimeError(f"Failed to write index file: {e}")
|
|
195
|
+
|
|
196
|
+
def _update_index(self, run: ExecutionRun) -> None:
|
|
197
|
+
"""Update index with run metadata."""
|
|
198
|
+
index = self._load_index()
|
|
199
|
+
|
|
200
|
+
index[run.run_id] = {
|
|
201
|
+
"procedure_name": run.procedure_name,
|
|
202
|
+
"file_path": run.file_path,
|
|
203
|
+
"start_time": run.start_time.isoformat(),
|
|
204
|
+
"status": run.status,
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
self._save_index(index)
|
|
208
|
+
|
|
209
|
+
def save_run(self, run: ExecutionRun) -> None:
|
|
210
|
+
"""
|
|
211
|
+
Save complete run data.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
run: Execution run to save
|
|
215
|
+
"""
|
|
216
|
+
run_path = self.runs_dir / f"{run.run_id}.json"
|
|
217
|
+
|
|
218
|
+
# Convert to dict with proper serialization
|
|
219
|
+
data = run.model_dump()
|
|
220
|
+
|
|
221
|
+
# Convert datetime objects to ISO strings
|
|
222
|
+
if isinstance(data.get("start_time"), datetime):
|
|
223
|
+
data["start_time"] = data["start_time"].isoformat()
|
|
224
|
+
if data.get("end_time") and isinstance(data.get("end_time"), datetime):
|
|
225
|
+
data["end_time"] = data["end_time"].isoformat()
|
|
226
|
+
|
|
227
|
+
# Convert checkpoint timestamps
|
|
228
|
+
for checkpoint in data.get("execution_log", []):
|
|
229
|
+
if isinstance(checkpoint.get("timestamp"), datetime):
|
|
230
|
+
checkpoint["timestamp"] = checkpoint["timestamp"].isoformat()
|
|
231
|
+
|
|
232
|
+
try:
|
|
233
|
+
with open(run_path, "w") as f:
|
|
234
|
+
json.dump(data, f, indent=2, default=str)
|
|
235
|
+
except (IOError, OSError) as e:
|
|
236
|
+
raise RuntimeError(f"Failed to save run {run.run_id}: {e}")
|
|
237
|
+
|
|
238
|
+
# Update index
|
|
239
|
+
self._update_index(run)
|
|
240
|
+
|
|
241
|
+
def load_run(self, run_id: str) -> ExecutionRun:
|
|
242
|
+
"""
|
|
243
|
+
Load complete run data.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
run_id: Run identifier
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
Execution run
|
|
250
|
+
|
|
251
|
+
Raises:
|
|
252
|
+
FileNotFoundError: If run not found
|
|
253
|
+
"""
|
|
254
|
+
run_path = self.runs_dir / f"{run_id}.json"
|
|
255
|
+
|
|
256
|
+
if not run_path.exists():
|
|
257
|
+
raise FileNotFoundError(f"Run {run_id} not found")
|
|
258
|
+
|
|
259
|
+
try:
|
|
260
|
+
with open(run_path, "r") as f:
|
|
261
|
+
data = json.load(f)
|
|
262
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
263
|
+
raise RuntimeError(f"Failed to load run {run_id}: {e}")
|
|
264
|
+
|
|
265
|
+
# Convert timestamps back to datetime objects
|
|
266
|
+
if data.get("start_time"):
|
|
267
|
+
data["start_time"] = datetime.fromisoformat(data["start_time"])
|
|
268
|
+
if data.get("end_time"):
|
|
269
|
+
data["end_time"] = datetime.fromisoformat(data["end_time"])
|
|
270
|
+
|
|
271
|
+
# Convert checkpoint timestamps and rebuild CheckpointEntry objects
|
|
272
|
+
execution_log = []
|
|
273
|
+
for cp_data in data.get("execution_log", []):
|
|
274
|
+
if cp_data.get("timestamp"):
|
|
275
|
+
cp_data["timestamp"] = datetime.fromisoformat(cp_data["timestamp"])
|
|
276
|
+
|
|
277
|
+
# Rebuild SourceLocation if present
|
|
278
|
+
if cp_data.get("source_location"):
|
|
279
|
+
cp_data["source_location"] = SourceLocation(**cp_data["source_location"])
|
|
280
|
+
|
|
281
|
+
execution_log.append(CheckpointEntry(**cp_data))
|
|
282
|
+
|
|
283
|
+
data["execution_log"] = execution_log
|
|
284
|
+
|
|
285
|
+
# Rebuild Breakpoint objects
|
|
286
|
+
breakpoints = []
|
|
287
|
+
for bp_data in data.get("breakpoints", []):
|
|
288
|
+
breakpoints.append(Breakpoint(**bp_data))
|
|
289
|
+
|
|
290
|
+
data["breakpoints"] = breakpoints
|
|
291
|
+
|
|
292
|
+
return ExecutionRun(**data)
|
|
293
|
+
|
|
294
|
+
def list_runs(self, procedure_name: Optional[str] = None) -> List[ExecutionRun]:
|
|
295
|
+
"""
|
|
296
|
+
List all runs, optionally filtered by procedure name.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
procedure_name: Optional procedure name filter
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
List of execution runs, sorted by start time (newest first)
|
|
303
|
+
"""
|
|
304
|
+
index = self._load_index()
|
|
305
|
+
|
|
306
|
+
# Filter by procedure name if specified
|
|
307
|
+
if procedure_name:
|
|
308
|
+
run_ids = [
|
|
309
|
+
rid for rid, info in index.items() if info.get("procedure_name") == procedure_name
|
|
310
|
+
]
|
|
311
|
+
else:
|
|
312
|
+
run_ids = list(index.keys())
|
|
313
|
+
|
|
314
|
+
# Load all matching runs
|
|
315
|
+
runs = []
|
|
316
|
+
for run_id in run_ids:
|
|
317
|
+
try:
|
|
318
|
+
runs.append(self.load_run(run_id))
|
|
319
|
+
except (FileNotFoundError, RuntimeError):
|
|
320
|
+
# Skip corrupted or missing runs
|
|
321
|
+
continue
|
|
322
|
+
|
|
323
|
+
# Sort by start time (newest first)
|
|
324
|
+
runs.sort(key=lambda r: r.start_time, reverse=True)
|
|
325
|
+
|
|
326
|
+
return runs
|
|
327
|
+
|
|
328
|
+
def save_breakpoints(self, procedure_name: str, breakpoints: List[Breakpoint]) -> None:
|
|
329
|
+
"""
|
|
330
|
+
Save breakpoints for a procedure.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
procedure_name: Procedure name
|
|
334
|
+
breakpoints: List of breakpoints
|
|
335
|
+
"""
|
|
336
|
+
bp_path = self.breakpoints_dir / f"{procedure_name}.json"
|
|
337
|
+
|
|
338
|
+
data = [bp.model_dump() for bp in breakpoints]
|
|
339
|
+
|
|
340
|
+
try:
|
|
341
|
+
with open(bp_path, "w") as f:
|
|
342
|
+
json.dump(data, f, indent=2)
|
|
343
|
+
except (IOError, OSError) as e:
|
|
344
|
+
raise RuntimeError(f"Failed to save breakpoints for {procedure_name}: {e}")
|
|
345
|
+
|
|
346
|
+
def load_breakpoints(self, procedure_name: str) -> List[Breakpoint]:
|
|
347
|
+
"""
|
|
348
|
+
Load breakpoints for a procedure.
|
|
349
|
+
|
|
350
|
+
Args:
|
|
351
|
+
procedure_name: Procedure name
|
|
352
|
+
|
|
353
|
+
Returns:
|
|
354
|
+
List of breakpoints
|
|
355
|
+
"""
|
|
356
|
+
bp_path = self.breakpoints_dir / f"{procedure_name}.json"
|
|
357
|
+
|
|
358
|
+
if not bp_path.exists():
|
|
359
|
+
return []
|
|
360
|
+
|
|
361
|
+
try:
|
|
362
|
+
with open(bp_path, "r") as f:
|
|
363
|
+
data = json.load(f)
|
|
364
|
+
except (json.JSONDecodeError, IOError):
|
|
365
|
+
return []
|
|
366
|
+
|
|
367
|
+
return [Breakpoint(**bp_data) for bp_data in data]
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""
|
|
2
|
+
HTTP Callback Log Handler for container event streaming.
|
|
3
|
+
|
|
4
|
+
Posts log events to a callback URL for real-time streaming from containers.
|
|
5
|
+
Used when TACTUS_CALLBACK_URL environment variable is set.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from typing import Optional, List
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from requests.adapters import HTTPAdapter
|
|
14
|
+
from urllib3.util.retry import Retry
|
|
15
|
+
|
|
16
|
+
from tactus.protocols.models import LogEvent, CostEvent
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class HTTPCallbackLogHandler:
|
|
22
|
+
"""
|
|
23
|
+
Log handler that POSTs events to an HTTP callback URL.
|
|
24
|
+
|
|
25
|
+
Used inside Docker containers to stream events back to the IDE backend.
|
|
26
|
+
The callback URL is provided via TACTUS_CALLBACK_URL environment variable.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
callback_url: str,
|
|
32
|
+
timeout: float = 5.0,
|
|
33
|
+
max_retries: int = 3,
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Initialize HTTP callback log handler.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
callback_url: URL to POST events to
|
|
40
|
+
timeout: Request timeout in seconds
|
|
41
|
+
max_retries: Number of retries on failure
|
|
42
|
+
"""
|
|
43
|
+
self.callback_url = callback_url
|
|
44
|
+
self.timeout = timeout
|
|
45
|
+
self.cost_events: List[CostEvent] = [] # Track cost events for aggregation
|
|
46
|
+
|
|
47
|
+
# Setup session with retry logic
|
|
48
|
+
self.session = requests.Session()
|
|
49
|
+
retry_strategy = Retry(
|
|
50
|
+
total=max_retries,
|
|
51
|
+
backoff_factor=0.1,
|
|
52
|
+
status_forcelist=[429, 500, 502, 503, 504],
|
|
53
|
+
)
|
|
54
|
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
|
55
|
+
self.session.mount("http://", adapter)
|
|
56
|
+
self.session.mount("https://", adapter)
|
|
57
|
+
|
|
58
|
+
logger.info(f"[HTTP_CALLBACK] Initialized with URL: {callback_url}")
|
|
59
|
+
|
|
60
|
+
def log(self, event: LogEvent) -> None:
|
|
61
|
+
"""
|
|
62
|
+
POST log event to callback URL.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
event: Structured log event
|
|
66
|
+
"""
|
|
67
|
+
# Track cost events for aggregation (mirrors IDELogHandler behavior)
|
|
68
|
+
if isinstance(event, CostEvent):
|
|
69
|
+
self.cost_events.append(event)
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
# Serialize event to JSON
|
|
73
|
+
event_dict = event.model_dump(mode="json")
|
|
74
|
+
|
|
75
|
+
# Format timestamp to ensure ISO format with Z suffix
|
|
76
|
+
iso_string = event.timestamp.isoformat()
|
|
77
|
+
if not (iso_string.endswith("Z") or "+" in iso_string or iso_string.count("-") > 2):
|
|
78
|
+
iso_string += "Z"
|
|
79
|
+
event_dict["timestamp"] = iso_string
|
|
80
|
+
|
|
81
|
+
# POST to callback URL
|
|
82
|
+
response = self.session.post(
|
|
83
|
+
self.callback_url,
|
|
84
|
+
json=event_dict,
|
|
85
|
+
timeout=self.timeout,
|
|
86
|
+
)
|
|
87
|
+
response.raise_for_status()
|
|
88
|
+
logger.debug(f"[HTTP_CALLBACK] Event posted: type={event.event_type}")
|
|
89
|
+
|
|
90
|
+
except requests.exceptions.RequestException as e:
|
|
91
|
+
# Log but don't fail - event streaming is best-effort
|
|
92
|
+
logger.warning(f"[HTTP_CALLBACK] Failed to POST event to {self.callback_url}: {e}")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
# Catch any other errors to prevent crashing the procedure
|
|
95
|
+
logger.warning(f"[HTTP_CALLBACK] Unexpected error posting event: {e}")
|
|
96
|
+
|
|
97
|
+
@classmethod
|
|
98
|
+
def from_environment(cls) -> Optional["HTTPCallbackLogHandler"]:
|
|
99
|
+
"""
|
|
100
|
+
Create handler from TACTUS_CALLBACK_URL environment variable.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
HTTPCallbackLogHandler if env var is set, None otherwise.
|
|
104
|
+
"""
|
|
105
|
+
callback_url = os.environ.get("TACTUS_CALLBACK_URL")
|
|
106
|
+
if callback_url:
|
|
107
|
+
logger.info(f"[HTTP_CALLBACK] Creating handler from environment: {callback_url}")
|
|
108
|
+
return cls(callback_url=callback_url)
|
|
109
|
+
return None
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""
|
|
2
|
+
IDE Log Handler for event collection and streaming.
|
|
3
|
+
|
|
4
|
+
Collects log events in a queue for streaming to IDE frontend.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import queue
|
|
9
|
+
from typing import List
|
|
10
|
+
|
|
11
|
+
from tactus.protocols.models import LogEvent
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class IDELogHandler:
|
|
17
|
+
"""
|
|
18
|
+
IDE log handler that collects events for streaming.
|
|
19
|
+
|
|
20
|
+
Receives structured log events and stores them in a queue
|
|
21
|
+
for retrieval and streaming to the IDE frontend.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
"""Initialize IDE log handler."""
|
|
26
|
+
self.events = queue.Queue()
|
|
27
|
+
self.cost_events = [] # Track cost events for aggregation
|
|
28
|
+
logger.debug("IDELogHandler initialized")
|
|
29
|
+
|
|
30
|
+
def log(self, event: LogEvent) -> None:
|
|
31
|
+
"""
|
|
32
|
+
Collect log event for streaming.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
event: Structured log event
|
|
36
|
+
"""
|
|
37
|
+
# Track cost events for aggregation
|
|
38
|
+
from tactus.protocols.models import CostEvent, AgentStreamChunkEvent
|
|
39
|
+
|
|
40
|
+
if isinstance(event, CostEvent):
|
|
41
|
+
self.cost_events.append(event)
|
|
42
|
+
|
|
43
|
+
# Debug logging for streaming events
|
|
44
|
+
if isinstance(event, AgentStreamChunkEvent):
|
|
45
|
+
logger.info(
|
|
46
|
+
f"[IDE_LOG] Received AgentStreamChunkEvent: agent={event.agent_name}, chunk_len={len(event.chunk_text)}, accumulated_len={len(event.accumulated_text)}"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
self.events.put(event)
|
|
50
|
+
logger.debug(
|
|
51
|
+
f"[IDE_LOG] Event queued: type={type(event).__name__}, queue_size={self.events.qsize()}"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def get_events(self, timeout: float = 0.1) -> List[LogEvent]:
|
|
55
|
+
"""
|
|
56
|
+
Get all available events from the queue.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
timeout: Timeout for queue.get() in seconds
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
List of LogEvent objects
|
|
63
|
+
"""
|
|
64
|
+
events = []
|
|
65
|
+
while True:
|
|
66
|
+
try:
|
|
67
|
+
event = self.events.get(timeout=timeout)
|
|
68
|
+
events.append(event)
|
|
69
|
+
except queue.Empty:
|
|
70
|
+
break
|
|
71
|
+
return events
|