synth-ai 0.2.13.dev2__py3-none-any.whl → 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/multi_step/configs/README_verilog_rl.md +77 -0
- examples/multi_step/configs/VERILOG_REWARDS.md +90 -0
- examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +183 -0
- examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +35 -0
- examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +36 -0
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +5 -4
- examples/multi_step/configs/crafter_synth_backend.md +40 -0
- examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +31 -0
- examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +33 -0
- examples/multi_step/configs/verilog_rl_lora.toml +190 -0
- examples/multi_step/judges/crafter_backend_judge.py +220 -0
- examples/multi_step/judges/verilog_backend_judge.py +234 -0
- examples/multi_step/readme.md +48 -0
- examples/multi_step/verilog_rl_lora.md +218 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +1 -1
- examples/sft/evaluate.py +2 -0
- examples/sft/generate_traces.py +2 -0
- examples/swe/task_app/grpo_swe_mini.py +1 -0
- examples/swe/task_app/hosted/rollout.py +2 -0
- examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +258 -0
- examples/task_apps/crafter/CREATE_SFT_DATASET.md +273 -0
- examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +152 -0
- examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +174 -0
- examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +268 -0
- examples/task_apps/crafter/QUERY_EXAMPLES.md +203 -0
- examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +316 -0
- examples/task_apps/crafter/eval_image_only_gpt4o.toml +28 -0
- examples/task_apps/crafter/eval_text_only_groq_llama.toml +36 -0
- examples/task_apps/crafter/filter_sft_dataset.toml +16 -0
- examples/task_apps/crafter/task_app/__init__.py +3 -0
- examples/task_apps/crafter/task_app/grpo_crafter.py +306 -8
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +10 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +16 -3
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +17 -2
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +25 -3
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +52 -1
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +111 -13
- examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +156 -0
- examples/task_apps/enron/filter_sft.toml +5 -0
- examples/task_apps/enron/tests/__init__.py +2 -0
- examples/task_apps/enron/tests/integration/__init__.py +2 -0
- examples/task_apps/enron/tests/integration/test_enron_eval.py +2 -0
- examples/task_apps/enron/tests/unit/__init__.py +2 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +283 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +155 -0
- examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +415 -0
- examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +29 -0
- examples/task_apps/pokemon_red/pallet_town_rl_config.toml +2 -0
- examples/task_apps/pokemon_red/task_app.py +199 -6
- examples/task_apps/pokemon_red/test_pallet_town_rewards.py +2 -0
- examples/task_apps/sokoban/filter_sft.toml +5 -0
- examples/task_apps/sokoban/tests/__init__.py +2 -0
- examples/task_apps/sokoban/tests/integration/__init__.py +2 -0
- examples/task_apps/sokoban/tests/unit/__init__.py +2 -0
- examples/task_apps/verilog/eval_groq_qwen32b.toml +8 -4
- examples/task_apps/verilog/filter_sft.toml +5 -0
- examples/task_apps/verilog/task_app/grpo_verilog.py +258 -23
- examples/task_apps/verilog/tests/__init__.py +2 -0
- examples/task_apps/verilog/tests/integration/__init__.py +2 -0
- examples/task_apps/verilog/tests/integration/test_verilog_eval.py +2 -0
- examples/task_apps/verilog/tests/unit/__init__.py +2 -0
- examples/warming_up_to_rl/groq_test.py +2 -0
- examples/warming_up_to_rl/run_local_rollout.py +2 -0
- examples/warming_up_to_rl/run_local_rollout_modal.py +2 -0
- examples/warming_up_to_rl/run_local_rollout_parallel.py +2 -0
- examples/warming_up_to_rl/run_local_rollout_traced.py +2 -0
- examples/warming_up_to_rl/run_rollout_remote.py +2 -0
- synth_ai/api/models/supported.py +1 -0
- synth_ai/cli/__init__.py +46 -13
- synth_ai/cli/_modal_wrapper.py +3 -2
- synth_ai/cli/recent.py +1 -1
- synth_ai/cli/status.py +1 -1
- synth_ai/cli/task_apps.py +354 -143
- synth_ai/cli/traces.py +1 -1
- synth_ai/cli/tui.py +57 -0
- synth_ai/cli/turso.py +1 -1
- synth_ai/cli/watch.py +1 -1
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
- synth_ai/environments/examples/crafter_classic/environment.py +1 -1
- synth_ai/environments/examples/verilog/engine.py +76 -10
- synth_ai/judge_schemas.py +8 -8
- synth_ai/task/__init__.py +11 -1
- synth_ai/task/apps/__init__.py +1 -0
- synth_ai/task/config.py +257 -0
- synth_ai/task/contracts.py +15 -2
- synth_ai/task/rubrics/__init__.py +3 -0
- synth_ai/task/rubrics/loaders.py +22 -3
- synth_ai/task/rubrics/scoring.py +3 -0
- synth_ai/task/trace_correlation_helpers.py +315 -0
- synth_ai/task/validators.py +144 -0
- synth_ai/tracing_v3/abstractions.py +3 -3
- synth_ai/tracing_v3/llm_call_record_helpers.py +5 -5
- synth_ai/tracing_v3/session_tracer.py +16 -6
- synth_ai/tracing_v3/storage/base.py +29 -29
- synth_ai/tracing_v3/storage/config.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +8 -7
- synth_ai/tracing_v3/turso/native_manager.py +63 -40
- synth_ai/tracing_v3/utils.py +3 -3
- synth_ai/tui/__init__.py +5 -0
- synth_ai/tui/__main__.py +13 -0
- synth_ai/tui/cli/__init__.py +1 -0
- synth_ai/tui/cli/query_experiments.py +164 -0
- synth_ai/tui/cli/query_experiments_v3.py +164 -0
- synth_ai/tui/dashboard.py +906 -0
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/METADATA +1 -1
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/RECORD +110 -71
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.14.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,906 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Interactive TUI Dashboard for Synth AI experiments.
|
|
4
|
+
|
|
5
|
+
Launch with: python -m synth_ai.tui.dashboard
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from urllib.parse import urlparse
|
|
12
|
+
|
|
13
|
+
# Import textual components with graceful fallback
|
|
14
|
+
try:
|
|
15
|
+
from textual import on
|
|
16
|
+
from textual.app import App, ComposeResult
|
|
17
|
+
from textual.binding import Binding
|
|
18
|
+
from textual.containers import Container
|
|
19
|
+
from textual.reactive import reactive
|
|
20
|
+
from textual.timer import Timer
|
|
21
|
+
from textual.widgets import (
|
|
22
|
+
DataTable,
|
|
23
|
+
Footer,
|
|
24
|
+
Header,
|
|
25
|
+
Static,
|
|
26
|
+
)
|
|
27
|
+
_TEXTUAL_AVAILABLE = True
|
|
28
|
+
except (ImportError, ModuleNotFoundError):
|
|
29
|
+
# Textual not available - provide dummy classes for type checking
|
|
30
|
+
on = None # type: ignore
|
|
31
|
+
App = object # type: ignore
|
|
32
|
+
ComposeResult = object # type: ignore
|
|
33
|
+
Binding = object # type: ignore
|
|
34
|
+
Container = object # type: ignore
|
|
35
|
+
reactive = lambda x: x # type: ignore
|
|
36
|
+
Timer = object # type: ignore
|
|
37
|
+
DataTable = object # type: ignore
|
|
38
|
+
Footer = object # type: ignore
|
|
39
|
+
Header = object # type: ignore
|
|
40
|
+
Static = object # type: ignore
|
|
41
|
+
_TEXTUAL_AVAILABLE = False
|
|
42
|
+
|
|
43
|
+
# Import database manager with graceful fallback
|
|
44
|
+
try:
|
|
45
|
+
from synth_ai.tracing_v3.turso.native_manager import NativeLibsqlTraceManager # type: ignore[import-untyped]
|
|
46
|
+
_DB_AVAILABLE = True
|
|
47
|
+
except (ImportError, ModuleNotFoundError, TypeError):
|
|
48
|
+
# Database manager not available - provide dummy class
|
|
49
|
+
NativeLibsqlTraceManager = object # type: ignore
|
|
50
|
+
_DB_AVAILABLE = False
|
|
51
|
+
|
|
52
|
+
import asyncio
|
|
53
|
+
import requests
|
|
54
|
+
from datetime import timedelta
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ExperimentRow:
|
|
58
|
+
"""Data structure for experiment display."""
|
|
59
|
+
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
exp_id: str,
|
|
63
|
+
name: str,
|
|
64
|
+
description: str,
|
|
65
|
+
created_at: datetime,
|
|
66
|
+
sessions: int,
|
|
67
|
+
events: int,
|
|
68
|
+
messages: int,
|
|
69
|
+
cost: float,
|
|
70
|
+
tokens: int,
|
|
71
|
+
):
|
|
72
|
+
self.exp_id = exp_id
|
|
73
|
+
self.name = name or "Unnamed"
|
|
74
|
+
self.description = description or ""
|
|
75
|
+
self.created_at = created_at
|
|
76
|
+
self.sessions = sessions
|
|
77
|
+
self.events = events
|
|
78
|
+
self.messages = messages
|
|
79
|
+
self.cost = cost
|
|
80
|
+
self.tokens = tokens
|
|
81
|
+
|
|
82
|
+
def to_row(self) -> list[str]:
|
|
83
|
+
"""Convert to table row format."""
|
|
84
|
+
return [
|
|
85
|
+
self.exp_id[:8], # Shortened ID
|
|
86
|
+
self.name[:20], # Truncated name
|
|
87
|
+
str(self.sessions),
|
|
88
|
+
str(self.events),
|
|
89
|
+
str(self.messages),
|
|
90
|
+
f"${self.cost:.4f}",
|
|
91
|
+
f"{self.tokens:,}",
|
|
92
|
+
self.created_at.strftime("%H:%M"),
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class ExperimentTable(DataTable):
|
|
97
|
+
"""Custom DataTable for experiments with refresh capability."""
|
|
98
|
+
|
|
99
|
+
def __init__(self, **kwargs):
|
|
100
|
+
super().__init__(**kwargs)
|
|
101
|
+
self.experiments: list[ExperimentRow] = []
|
|
102
|
+
self.selected_exp_id: str | None = None
|
|
103
|
+
|
|
104
|
+
def setup_table(self):
|
|
105
|
+
"""Initialize table columns."""
|
|
106
|
+
self.add_columns("ID", "Name", "Sessions", "Events", "Messages", "Cost", "Tokens", "Time")
|
|
107
|
+
|
|
108
|
+
async def refresh_data(self, db_manager: NativeLibsqlTraceManager | None) -> None:
|
|
109
|
+
"""Refresh experiment data from database."""
|
|
110
|
+
if not db_manager:
|
|
111
|
+
# Database not available, clear the table
|
|
112
|
+
self.experiments.clear()
|
|
113
|
+
self.clear()
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
# Get experiment list with stats using raw query
|
|
118
|
+
df = await db_manager.query_traces("""
|
|
119
|
+
SELECT
|
|
120
|
+
e.experiment_id,
|
|
121
|
+
e.name,
|
|
122
|
+
e.description,
|
|
123
|
+
e.created_at,
|
|
124
|
+
COUNT(DISTINCT st.session_id) as num_sessions,
|
|
125
|
+
COUNT(DISTINCT ev.id) as num_events,
|
|
126
|
+
COUNT(DISTINCT m.id) as num_messages,
|
|
127
|
+
SUM(CASE WHEN ev.event_type = 'cais' THEN ev.cost_usd ELSE 0 END) / 100.0 as total_cost,
|
|
128
|
+
SUM(CASE WHEN ev.event_type = 'cais' THEN ev.total_tokens ELSE 0 END) as total_tokens
|
|
129
|
+
FROM experiments e
|
|
130
|
+
LEFT JOIN session_traces st ON e.experiment_id = st.experiment_id
|
|
131
|
+
LEFT JOIN events ev ON st.session_id = ev.session_id
|
|
132
|
+
LEFT JOIN messages m ON st.session_id = m.session_id
|
|
133
|
+
GROUP BY e.experiment_id, e.name, e.description, e.created_at
|
|
134
|
+
ORDER BY e.created_at DESC
|
|
135
|
+
""")
|
|
136
|
+
|
|
137
|
+
self.experiments.clear()
|
|
138
|
+
self.clear()
|
|
139
|
+
|
|
140
|
+
if not df.empty:
|
|
141
|
+
for _, row in df.iterrows():
|
|
142
|
+
exp_row = ExperimentRow(
|
|
143
|
+
exp_id=row["experiment_id"],
|
|
144
|
+
name=row["name"],
|
|
145
|
+
description=row["description"],
|
|
146
|
+
created_at=row["created_at"],
|
|
147
|
+
sessions=int(row["num_sessions"] or 0),
|
|
148
|
+
events=int(row["num_events"] or 0),
|
|
149
|
+
messages=int(row["num_messages"] or 0),
|
|
150
|
+
cost=float(row["total_cost"] or 0.0),
|
|
151
|
+
tokens=int(row["total_tokens"] or 0),
|
|
152
|
+
)
|
|
153
|
+
self.experiments.append(exp_row)
|
|
154
|
+
self.add_row(*exp_row.to_row(), key=exp_row.exp_id)
|
|
155
|
+
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logging.error(f"Failed to refresh experiments: {e}")
|
|
158
|
+
|
|
159
|
+
def get_selected_experiment(self) -> ExperimentRow | None:
|
|
160
|
+
"""Get currently selected experiment."""
|
|
161
|
+
if self.cursor_row >= 0 and self.cursor_row < len(self.experiments):
|
|
162
|
+
return self.experiments[self.cursor_row]
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class ExperimentDetail(Static):
|
|
167
|
+
"""Detailed view of selected experiment."""
|
|
168
|
+
|
|
169
|
+
def __init__(self, **kwargs):
|
|
170
|
+
super().__init__(**kwargs)
|
|
171
|
+
self.current_experiment: ExperimentRow | None = None
|
|
172
|
+
|
|
173
|
+
def update_experiment(self, experiment: ExperimentRow | None):
|
|
174
|
+
"""Update the displayed experiment details."""
|
|
175
|
+
self.current_experiment = experiment
|
|
176
|
+
if experiment:
|
|
177
|
+
details = f"""
|
|
178
|
+
🔬 **{experiment.name}**
|
|
179
|
+
ID: {experiment.exp_id}
|
|
180
|
+
Description: {experiment.description or "No description"}
|
|
181
|
+
|
|
182
|
+
📊 **Statistics**
|
|
183
|
+
Sessions: {experiment.sessions}
|
|
184
|
+
Events: {experiment.events}
|
|
185
|
+
Messages: {experiment.messages}
|
|
186
|
+
Cost: ${experiment.cost:.4f}
|
|
187
|
+
Tokens: {experiment.tokens:,}
|
|
188
|
+
|
|
189
|
+
🕒 **Created**: {experiment.created_at.strftime("%Y-%m-%d %H:%M:%S")}
|
|
190
|
+
""".strip()
|
|
191
|
+
else:
|
|
192
|
+
details = "Select an experiment to view details"
|
|
193
|
+
|
|
194
|
+
self.update(details)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class DatabaseStatus(Static):
|
|
198
|
+
"""Display database connection status."""
|
|
199
|
+
|
|
200
|
+
connection_status = reactive("🔴 Disconnected")
|
|
201
|
+
db_info = reactive("")
|
|
202
|
+
|
|
203
|
+
def __init__(self, **kwargs):
|
|
204
|
+
super().__init__(**kwargs)
|
|
205
|
+
|
|
206
|
+
def render(self) -> str:
|
|
207
|
+
status_line = f"Database: {self.connection_status}"
|
|
208
|
+
if self.db_info:
|
|
209
|
+
status_line += f" | {self.db_info}"
|
|
210
|
+
return status_line
|
|
211
|
+
|
|
212
|
+
def set_connected(self, url: str, db_name: str = ""):
|
|
213
|
+
parsed = urlparse(url)
|
|
214
|
+
if "sqlite" in url:
|
|
215
|
+
# Extract just the filename for cleaner display
|
|
216
|
+
from pathlib import Path
|
|
217
|
+
try:
|
|
218
|
+
path_part = url.split("///")[-1]
|
|
219
|
+
filename = Path(path_part).name
|
|
220
|
+
self.connection_status = f"🟢 {filename}"
|
|
221
|
+
except:
|
|
222
|
+
self.connection_status = f"🟢 Connected"
|
|
223
|
+
else:
|
|
224
|
+
host_info = f"{parsed.hostname}:{parsed.port}" if parsed.port else str(parsed.hostname)
|
|
225
|
+
self.connection_status = f"🟢 {host_info}"
|
|
226
|
+
|
|
227
|
+
if db_name:
|
|
228
|
+
self.db_info = f"[{db_name}]"
|
|
229
|
+
|
|
230
|
+
def set_disconnected(self, error: str = ""):
|
|
231
|
+
error_text = f" - {error}" if error else ""
|
|
232
|
+
self.connection_status = f"🔴 Disconnected{error_text}"
|
|
233
|
+
self.db_info = ""
|
|
234
|
+
|
|
235
|
+
def set_db_selector(self, current: int, total: int):
|
|
236
|
+
"""Show database selector info."""
|
|
237
|
+
if total > 1:
|
|
238
|
+
self.db_info = f"DB {current + 1}/{total} (n/p to switch)"
|
|
239
|
+
else:
|
|
240
|
+
self.db_info = ""
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class BalanceStatus(Static):
|
|
244
|
+
"""Display balance and spending information (local + global)."""
|
|
245
|
+
|
|
246
|
+
# Global (backend API)
|
|
247
|
+
global_balance = reactive("$0.00")
|
|
248
|
+
global_spend_24h = reactive("$0.00")
|
|
249
|
+
global_spend_7d = reactive("$0.00")
|
|
250
|
+
global_status = reactive("⏳")
|
|
251
|
+
|
|
252
|
+
# Local (database)
|
|
253
|
+
local_traces = reactive(0)
|
|
254
|
+
local_cost = reactive("$0.00")
|
|
255
|
+
local_tokens = reactive(0)
|
|
256
|
+
local_tasks = reactive([]) # List of (task_name, count) tuples
|
|
257
|
+
local_status = reactive("⏳")
|
|
258
|
+
|
|
259
|
+
def __init__(self, **kwargs):
|
|
260
|
+
super().__init__(**kwargs)
|
|
261
|
+
|
|
262
|
+
def render(self) -> str:
|
|
263
|
+
# Format tokens safely
|
|
264
|
+
if isinstance(self.local_tokens, int) and self.local_tokens > 0:
|
|
265
|
+
if self.local_tokens >= 1_000_000:
|
|
266
|
+
tokens_str = f"{self.local_tokens / 1_000_000:.1f}M"
|
|
267
|
+
elif self.local_tokens >= 1_000:
|
|
268
|
+
tokens_str = f"{self.local_tokens / 1_000:.1f}K"
|
|
269
|
+
else:
|
|
270
|
+
tokens_str = f"{self.local_tokens}"
|
|
271
|
+
else:
|
|
272
|
+
tokens_str = str(self.local_tokens)
|
|
273
|
+
|
|
274
|
+
# Format tasks - show top 3 only
|
|
275
|
+
tasks_str = ""
|
|
276
|
+
if self.local_tasks and len(self.local_tasks) > 0:
|
|
277
|
+
top_tasks = self.local_tasks[:3]
|
|
278
|
+
task_lines = [f"{name} ({count})" for name, count in top_tasks]
|
|
279
|
+
tasks_str = " | " + ", ".join(task_lines)
|
|
280
|
+
if len(self.local_tasks) > 3:
|
|
281
|
+
tasks_str += f", +{len(self.local_tasks) - 3}"
|
|
282
|
+
|
|
283
|
+
# Compact single-line format
|
|
284
|
+
return f"""[b]Local[/b] {self.local_status} {self.local_traces} traces | {self.local_cost} | {tokens_str} tokens{tasks_str}
|
|
285
|
+
|
|
286
|
+
[b]Global[/b] {self.global_status} {self.global_balance} | 24h: {self.global_spend_24h} | 7d: {self.global_spend_7d}"""
|
|
287
|
+
|
|
288
|
+
def update_global(self, balance: float, spend_24h: float, spend_7d: float):
|
|
289
|
+
"""Update global backend balance information."""
|
|
290
|
+
self.global_balance = f"${balance:.2f}"
|
|
291
|
+
self.global_spend_24h = f"${spend_24h:.2f}"
|
|
292
|
+
self.global_spend_7d = f"${spend_7d:.2f}"
|
|
293
|
+
self.global_status = "✅"
|
|
294
|
+
|
|
295
|
+
def update_local(self, traces: int, cost: float, tokens: int, tasks: list[tuple[str, int]] | None = None):
|
|
296
|
+
"""Update local database statistics."""
|
|
297
|
+
self.local_traces = traces
|
|
298
|
+
self.local_cost = f"${cost:.4f}"
|
|
299
|
+
self.local_tokens = tokens
|
|
300
|
+
self.local_tasks = tasks or []
|
|
301
|
+
self.local_status = "✅"
|
|
302
|
+
|
|
303
|
+
def set_global_loading(self):
|
|
304
|
+
"""Show loading state for global data."""
|
|
305
|
+
self.global_balance = "..."
|
|
306
|
+
self.global_spend_24h = "..."
|
|
307
|
+
self.global_spend_7d = "..."
|
|
308
|
+
self.global_status = "⏳"
|
|
309
|
+
|
|
310
|
+
def set_local_loading(self):
|
|
311
|
+
"""Show loading state for local data."""
|
|
312
|
+
self.local_traces = 0
|
|
313
|
+
self.local_cost = "..."
|
|
314
|
+
self.local_tokens = 0
|
|
315
|
+
self.local_tasks = []
|
|
316
|
+
self.local_status = "⏳"
|
|
317
|
+
|
|
318
|
+
def set_global_error(self, error: str):
|
|
319
|
+
"""Show error state for global data."""
|
|
320
|
+
self.global_balance = f"Error"
|
|
321
|
+
self.global_spend_24h = "-"
|
|
322
|
+
self.global_spend_7d = "-"
|
|
323
|
+
self.global_status = f"❌"
|
|
324
|
+
|
|
325
|
+
def set_local_error(self, error: str):
|
|
326
|
+
"""Show error state for local data."""
|
|
327
|
+
self.local_traces = 0
|
|
328
|
+
self.local_cost = "Error"
|
|
329
|
+
self.local_tokens = 0
|
|
330
|
+
self.local_tasks = []
|
|
331
|
+
self.local_status = f"❌"
|
|
332
|
+
|
|
333
|
+
def set_global_unavailable(self):
|
|
334
|
+
"""Mark global data as unavailable (no API key)."""
|
|
335
|
+
self.global_balance = "N/A"
|
|
336
|
+
self.global_spend_24h = "N/A"
|
|
337
|
+
self.global_spend_7d = "N/A"
|
|
338
|
+
self.global_status = "⚪"
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
class ActiveRunsTable(DataTable):
|
|
342
|
+
"""Display currently active/running sessions."""
|
|
343
|
+
|
|
344
|
+
def __init__(self, **kwargs):
|
|
345
|
+
super().__init__(**kwargs)
|
|
346
|
+
self.active_runs: list[dict] = []
|
|
347
|
+
|
|
348
|
+
def setup_table(self):
|
|
349
|
+
"""Initialize table columns."""
|
|
350
|
+
self.add_columns("Session", "Experiment", "Started", "Duration", "Events", "Status")
|
|
351
|
+
|
|
352
|
+
async def refresh_data(self, db_manager: NativeLibsqlTraceManager | None) -> None:
|
|
353
|
+
"""Refresh active runs data from database."""
|
|
354
|
+
if not db_manager:
|
|
355
|
+
# Database not available, clear the table
|
|
356
|
+
self.active_runs.clear()
|
|
357
|
+
self.clear()
|
|
358
|
+
return
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
# Get active sessions (those with recent activity in last 5 minutes)
|
|
362
|
+
cutoff_time = datetime.now() - timedelta(minutes=5)
|
|
363
|
+
|
|
364
|
+
df = await db_manager.query_traces("""
|
|
365
|
+
WITH recent_sessions AS (
|
|
366
|
+
SELECT
|
|
367
|
+
st.session_id,
|
|
368
|
+
st.experiment_id,
|
|
369
|
+
st.created_at,
|
|
370
|
+
e.name as experiment_name,
|
|
371
|
+
COUNT(ev.id) as event_count,
|
|
372
|
+
MAX(ev.created_at) as last_event_time
|
|
373
|
+
FROM session_traces st
|
|
374
|
+
LEFT JOIN experiments e ON st.experiment_id = e.experiment_id
|
|
375
|
+
LEFT JOIN events ev ON st.session_id = ev.session_id
|
|
376
|
+
WHERE st.created_at >= :cutoff_time
|
|
377
|
+
GROUP BY st.session_id, st.experiment_id, st.created_at, e.name
|
|
378
|
+
)
|
|
379
|
+
SELECT
|
|
380
|
+
session_id,
|
|
381
|
+
experiment_id,
|
|
382
|
+
experiment_name,
|
|
383
|
+
created_at,
|
|
384
|
+
event_count,
|
|
385
|
+
last_event_time
|
|
386
|
+
FROM recent_sessions
|
|
387
|
+
ORDER BY last_event_time DESC
|
|
388
|
+
""", {"cutoff_time": cutoff_time})
|
|
389
|
+
|
|
390
|
+
self.active_runs.clear()
|
|
391
|
+
self.clear()
|
|
392
|
+
|
|
393
|
+
if not df.empty:
|
|
394
|
+
for _, row in df.iterrows():
|
|
395
|
+
session_id = str(row["session_id"])
|
|
396
|
+
experiment_name = row["experiment_name"] or "Unknown"
|
|
397
|
+
|
|
398
|
+
# Parse datetime strings
|
|
399
|
+
try:
|
|
400
|
+
if isinstance(row["created_at"], str):
|
|
401
|
+
from dateutil import parser as date_parser
|
|
402
|
+
started_at = date_parser.parse(row["created_at"])
|
|
403
|
+
else:
|
|
404
|
+
started_at = row["created_at"]
|
|
405
|
+
|
|
406
|
+
if isinstance(row["last_event_time"], str):
|
|
407
|
+
from dateutil import parser as date_parser
|
|
408
|
+
last_event_time = date_parser.parse(row["last_event_time"])
|
|
409
|
+
else:
|
|
410
|
+
last_event_time = row["last_event_time"]
|
|
411
|
+
except Exception as e:
|
|
412
|
+
logging.error(f"Failed to parse datetime: {e}")
|
|
413
|
+
continue
|
|
414
|
+
|
|
415
|
+
duration = datetime.now() - started_at
|
|
416
|
+
|
|
417
|
+
# Format duration
|
|
418
|
+
if duration.total_seconds() < 3600: # Less than 1 hour
|
|
419
|
+
duration_str = f"{int(duration.total_seconds() // 60)}m"
|
|
420
|
+
else:
|
|
421
|
+
hours = int(duration.total_seconds() // 3600)
|
|
422
|
+
minutes = int((duration.total_seconds() % 3600) // 60)
|
|
423
|
+
duration_str = f"{hours}h {minutes}m"
|
|
424
|
+
|
|
425
|
+
# Status based on recent activity
|
|
426
|
+
time_since_last = datetime.now() - last_event_time
|
|
427
|
+
if time_since_last.total_seconds() < 60: # Active in last minute
|
|
428
|
+
status = "🟢 Active"
|
|
429
|
+
elif time_since_last.total_seconds() < 300: # Active in last 5 minutes
|
|
430
|
+
status = "🟡 Recent"
|
|
431
|
+
else:
|
|
432
|
+
status = "🟠 Idle"
|
|
433
|
+
|
|
434
|
+
run_info = {
|
|
435
|
+
"session_id": session_id,
|
|
436
|
+
"experiment_name": experiment_name,
|
|
437
|
+
"started_at": started_at,
|
|
438
|
+
"duration": duration_str,
|
|
439
|
+
"events": int(row["event_count"]),
|
|
440
|
+
"status": status
|
|
441
|
+
}
|
|
442
|
+
self.active_runs.append(run_info)
|
|
443
|
+
self.add_row(
|
|
444
|
+
session_id[:8], # Shortened session ID
|
|
445
|
+
experiment_name[:20], # Truncated name
|
|
446
|
+
started_at.strftime("%H:%M:%S"),
|
|
447
|
+
duration_str,
|
|
448
|
+
str(run_info["events"]),
|
|
449
|
+
status,
|
|
450
|
+
key=session_id
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
except Exception as e:
|
|
454
|
+
logging.error(f"Failed to refresh active runs: {e}")
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
def find_databases() -> list[tuple[str, str]]:
|
|
458
|
+
"""Find all available databases in common locations.
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
List of (name, path) tuples
|
|
462
|
+
"""
|
|
463
|
+
databases = []
|
|
464
|
+
search_paths = [
|
|
465
|
+
"traces/v3",
|
|
466
|
+
"traces",
|
|
467
|
+
".",
|
|
468
|
+
]
|
|
469
|
+
|
|
470
|
+
for search_path in search_paths:
|
|
471
|
+
try:
|
|
472
|
+
from pathlib import Path
|
|
473
|
+
search_dir = Path(search_path)
|
|
474
|
+
if not search_dir.exists():
|
|
475
|
+
continue
|
|
476
|
+
|
|
477
|
+
# Find all .db files
|
|
478
|
+
for db_file in search_dir.glob("**/*.db"):
|
|
479
|
+
if db_file.is_file():
|
|
480
|
+
# Use relative path from current directory
|
|
481
|
+
rel_path = str(db_file.relative_to(Path.cwd()))
|
|
482
|
+
# Create a friendly name
|
|
483
|
+
name = db_file.stem # filename without .db
|
|
484
|
+
if len(databases) == 0:
|
|
485
|
+
name = f"{name} (default)"
|
|
486
|
+
databases.append((name, rel_path))
|
|
487
|
+
except Exception as e:
|
|
488
|
+
logging.debug(f"Error scanning {search_path}: {e}")
|
|
489
|
+
|
|
490
|
+
# If no databases found, return default
|
|
491
|
+
if not databases:
|
|
492
|
+
databases.append(("synth_ai (default)", "traces/v3/synth_ai.db"))
|
|
493
|
+
|
|
494
|
+
return databases
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
class SynthDashboard(App if _TEXTUAL_AVAILABLE else object):
|
|
498
|
+
"""Main Synth AI TUI Dashboard application."""
|
|
499
|
+
|
|
500
|
+
CSS = """
|
|
501
|
+
Screen {
|
|
502
|
+
layout: grid;
|
|
503
|
+
grid-columns: 1fr 1fr 1fr;
|
|
504
|
+
grid-rows: auto 1fr 1fr auto;
|
|
505
|
+
grid-gutter: 1;
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
#header {
|
|
509
|
+
column-span: 3;
|
|
510
|
+
height: 3;
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
#experiments-table {
|
|
514
|
+
row-span: 2;
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
#active-runs-panel {
|
|
518
|
+
column-span: 1;
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
#balance-status {
|
|
522
|
+
column-span: 1;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
#experiment-detail {
|
|
526
|
+
column-span: 2;
|
|
527
|
+
height: 1fr;
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
#status-bar {
|
|
531
|
+
column-span: 3;
|
|
532
|
+
height: 3;
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
ExperimentTable {
|
|
536
|
+
height: 100%;
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
ActiveRunsTable {
|
|
540
|
+
height: 100%;
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
ExperimentDetail {
|
|
544
|
+
border: solid $primary;
|
|
545
|
+
padding: 1;
|
|
546
|
+
height: 100%;
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
BalanceStatus {
|
|
550
|
+
border: solid $primary;
|
|
551
|
+
padding: 1;
|
|
552
|
+
height: 100%;
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
DatabaseStatus {
|
|
556
|
+
height: 1;
|
|
557
|
+
padding: 0 1;
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
.section-title {
|
|
561
|
+
text-style: bold;
|
|
562
|
+
height: 1;
|
|
563
|
+
}
|
|
564
|
+
"""
|
|
565
|
+
|
|
566
|
+
BINDINGS = [
|
|
567
|
+
Binding("q", "quit", "Quit"),
|
|
568
|
+
Binding("r", "refresh", "Refresh"),
|
|
569
|
+
Binding("n", "next_db", "Next DB"),
|
|
570
|
+
Binding("p", "prev_db", "Prev DB"),
|
|
571
|
+
Binding("d", "toggle_debug", "Debug"),
|
|
572
|
+
("ctrl+c", "quit", "Quit"),
|
|
573
|
+
]
|
|
574
|
+
|
|
575
|
+
def __init__(self, db_url: str = "sqlite+aiosqlite:///traces/v3/synth_ai.db"):
|
|
576
|
+
super().__init__()
|
|
577
|
+
self.db_url = db_url
|
|
578
|
+
self.db_manager: NativeLibsqlTraceManager | None = None
|
|
579
|
+
self.refresh_timer: Timer | None = None
|
|
580
|
+
|
|
581
|
+
# Database discovery and selection
|
|
582
|
+
self.available_dbs: list[tuple[str, str]] = find_databases()
|
|
583
|
+
self.current_db_index: int = 0
|
|
584
|
+
|
|
585
|
+
# Log discovered databases
|
|
586
|
+
logging.info(f"Found {len(self.available_dbs)} database(s):")
|
|
587
|
+
for idx, (name, path) in enumerate(self.available_dbs):
|
|
588
|
+
logging.info(f" [{idx+1}] {name}: {path}")
|
|
589
|
+
|
|
590
|
+
# Try to find the initial db_url in available_dbs
|
|
591
|
+
for idx, (name, path) in enumerate(self.available_dbs):
|
|
592
|
+
if path in db_url or db_url.endswith(path):
|
|
593
|
+
self.current_db_index = idx
|
|
594
|
+
logging.info(f"Using database: {name} ({path})")
|
|
595
|
+
break
|
|
596
|
+
|
|
597
|
+
def compose(self) -> ComposeResult:
|
|
598
|
+
"""Create the UI layout."""
|
|
599
|
+
yield Header(show_clock=True)
|
|
600
|
+
|
|
601
|
+
with Container(id="experiments-table"):
|
|
602
|
+
yield Static("🧪 Experiments", classes="section-title")
|
|
603
|
+
yield ExperimentTable(id="experiments")
|
|
604
|
+
|
|
605
|
+
with Container(id="active-runs-panel"):
|
|
606
|
+
yield Static("⚡ Active Runs", classes="section-title")
|
|
607
|
+
yield ActiveRunsTable(id="active-runs")
|
|
608
|
+
|
|
609
|
+
with Container(id="balance-status"):
|
|
610
|
+
yield Static("💰 Balance & Stats", classes="section-title")
|
|
611
|
+
yield BalanceStatus(id="balance")
|
|
612
|
+
|
|
613
|
+
with Container(id="experiment-detail"):
|
|
614
|
+
yield Static("📋 Details", classes="section-title")
|
|
615
|
+
yield ExperimentDetail(id="detail")
|
|
616
|
+
|
|
617
|
+
with Container(id="status-bar"):
|
|
618
|
+
yield DatabaseStatus(id="db-status")
|
|
619
|
+
yield Footer()
|
|
620
|
+
|
|
621
|
+
async def on_mount(self) -> None:
|
|
622
|
+
"""Initialize the app when mounted."""
|
|
623
|
+
# Setup database connection - make it optional
|
|
624
|
+
await self._connect_to_database()
|
|
625
|
+
|
|
626
|
+
# Setup tables
|
|
627
|
+
exp_table = self.query_one("#experiments", ExperimentTable)
|
|
628
|
+
exp_table.setup_table()
|
|
629
|
+
|
|
630
|
+
active_runs_table = self.query_one("#active-runs", ActiveRunsTable)
|
|
631
|
+
active_runs_table.setup_table()
|
|
632
|
+
|
|
633
|
+
# Set balance loading state
|
|
634
|
+
balance_widget = self.query_one("#balance", BalanceStatus)
|
|
635
|
+
balance_widget.set_global_loading()
|
|
636
|
+
balance_widget.set_local_loading()
|
|
637
|
+
|
|
638
|
+
# Initial data load
|
|
639
|
+
await self.action_refresh()
|
|
640
|
+
|
|
641
|
+
# Start auto-refresh timer (every 5 seconds)
|
|
642
|
+
self.refresh_timer = self.set_interval(5.0, self._auto_refresh_data)
|
|
643
|
+
|
|
644
|
+
async def _auto_refresh_data(self) -> None:
|
|
645
|
+
"""Auto-refresh data periodically."""
|
|
646
|
+
exp_table = self.query_one("#experiments", ExperimentTable)
|
|
647
|
+
active_runs_table = self.query_one("#active-runs", ActiveRunsTable)
|
|
648
|
+
balance_widget = self.query_one("#balance", BalanceStatus)
|
|
649
|
+
|
|
650
|
+
if self.db_manager:
|
|
651
|
+
await exp_table.refresh_data(self.db_manager)
|
|
652
|
+
await active_runs_table.refresh_data(self.db_manager)
|
|
653
|
+
await self._refresh_local_stats(balance_widget)
|
|
654
|
+
|
|
655
|
+
# Always try to refresh global balance (independent of local DB)
|
|
656
|
+
await self._refresh_global_balance(balance_widget)
|
|
657
|
+
|
|
658
|
+
async def action_refresh(self) -> None:
|
|
659
|
+
"""Manual refresh action."""
|
|
660
|
+
exp_table = self.query_one("#experiments", ExperimentTable)
|
|
661
|
+
active_runs_table = self.query_one("#active-runs", ActiveRunsTable)
|
|
662
|
+
balance_widget = self.query_one("#balance", BalanceStatus)
|
|
663
|
+
|
|
664
|
+
balance_widget.set_global_loading()
|
|
665
|
+
balance_widget.set_local_loading()
|
|
666
|
+
|
|
667
|
+
if self.db_manager:
|
|
668
|
+
await exp_table.refresh_data(self.db_manager)
|
|
669
|
+
await active_runs_table.refresh_data(self.db_manager)
|
|
670
|
+
await self._refresh_local_stats(balance_widget)
|
|
671
|
+
|
|
672
|
+
# Always try to refresh global balance (independent of local DB)
|
|
673
|
+
await self._refresh_global_balance(balance_widget)
|
|
674
|
+
|
|
675
|
+
async def _refresh_local_stats(self, balance_widget: BalanceStatus) -> None:
|
|
676
|
+
"""Refresh local database statistics."""
|
|
677
|
+
if not self.db_manager:
|
|
678
|
+
logging.warning("No database manager available for local stats")
|
|
679
|
+
balance_widget.set_local_error("No database")
|
|
680
|
+
return
|
|
681
|
+
|
|
682
|
+
try:
|
|
683
|
+
logging.info("Fetching local stats from database...")
|
|
684
|
+
# Query local trace statistics
|
|
685
|
+
df = await self.db_manager.query_traces("""
|
|
686
|
+
SELECT
|
|
687
|
+
COUNT(DISTINCT st.session_id) as num_traces,
|
|
688
|
+
SUM(CASE WHEN ev.event_type = 'cais' THEN ev.cost_usd ELSE 0 END) / 100.0 as total_cost,
|
|
689
|
+
SUM(CASE WHEN ev.event_type = 'cais' THEN ev.total_tokens ELSE 0 END) as total_tokens
|
|
690
|
+
FROM session_traces st
|
|
691
|
+
LEFT JOIN events ev ON st.session_id = ev.session_id
|
|
692
|
+
""")
|
|
693
|
+
|
|
694
|
+
# Query task/environment breakdown from metadata
|
|
695
|
+
task_df = await self.db_manager.query_traces("""
|
|
696
|
+
SELECT
|
|
697
|
+
json_extract(metadata, '$.env_name') as task_name,
|
|
698
|
+
COUNT(DISTINCT session_id) as trace_count
|
|
699
|
+
FROM session_traces
|
|
700
|
+
WHERE json_extract(metadata, '$.env_name') IS NOT NULL
|
|
701
|
+
GROUP BY task_name
|
|
702
|
+
ORDER BY trace_count DESC
|
|
703
|
+
LIMIT 10
|
|
704
|
+
""")
|
|
705
|
+
|
|
706
|
+
if not df.empty:
|
|
707
|
+
row = df.iloc[0]
|
|
708
|
+
num_traces = int(row["num_traces"] or 0)
|
|
709
|
+
total_cost = float(row["total_cost"] or 0.0)
|
|
710
|
+
total_tokens = int(row["total_tokens"] or 0)
|
|
711
|
+
|
|
712
|
+
# Parse task data
|
|
713
|
+
tasks = []
|
|
714
|
+
if not task_df.empty:
|
|
715
|
+
for _, task_row in task_df.iterrows():
|
|
716
|
+
task_name = task_row["task_name"]
|
|
717
|
+
count = int(task_row["trace_count"])
|
|
718
|
+
if task_name:
|
|
719
|
+
tasks.append((str(task_name), count))
|
|
720
|
+
|
|
721
|
+
logging.info(f"Local stats: {num_traces} traces, ${total_cost:.4f}, {total_tokens} tokens, {len(tasks)} tasks")
|
|
722
|
+
balance_widget.update_local(num_traces, total_cost, total_tokens, tasks)
|
|
723
|
+
else:
|
|
724
|
+
logging.warning("Query returned empty dataframe")
|
|
725
|
+
balance_widget.update_local(0, 0.0, 0, [])
|
|
726
|
+
|
|
727
|
+
except Exception as e:
|
|
728
|
+
logging.error(f"Failed to refresh local stats: {e}", exc_info=True)
|
|
729
|
+
balance_widget.set_local_error(str(e)[:20])
|
|
730
|
+
|
|
731
|
+
async def _refresh_global_balance(self, balance_widget: BalanceStatus) -> None:
|
|
732
|
+
"""Refresh balance information from backend API."""
|
|
733
|
+
try:
|
|
734
|
+
# Try to get balance from environment or API
|
|
735
|
+
api_key = os.getenv("SYNTH_API_KEY") or os.getenv("SYNTH_BACKEND_API_KEY")
|
|
736
|
+
if not api_key:
|
|
737
|
+
balance_widget.set_global_unavailable()
|
|
738
|
+
return
|
|
739
|
+
|
|
740
|
+
# Try to get backend URL from environment
|
|
741
|
+
backend_url = os.getenv("SYNTH_BACKEND_BASE_URL") or "https://agent-learning.onrender.com/api/v1"
|
|
742
|
+
|
|
743
|
+
# Fetch balance
|
|
744
|
+
response = requests.get(
|
|
745
|
+
f"{backend_url}/balance/current",
|
|
746
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
747
|
+
timeout=5
|
|
748
|
+
)
|
|
749
|
+
response.raise_for_status()
|
|
750
|
+
data = response.json()
|
|
751
|
+
|
|
752
|
+
balance = float(data.get("balance_dollars", 0.0))
|
|
753
|
+
|
|
754
|
+
# Try to get usage data
|
|
755
|
+
try:
|
|
756
|
+
usage_response = requests.get(
|
|
757
|
+
f"{backend_url}/balance/usage/windows",
|
|
758
|
+
params={"hours": "24,168"},
|
|
759
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
760
|
+
timeout=5
|
|
761
|
+
)
|
|
762
|
+
if usage_response.ok:
|
|
763
|
+
usage_data = usage_response.json()
|
|
764
|
+
windows = {
|
|
765
|
+
int(r.get("window_hours")): r
|
|
766
|
+
for r in usage_data.get("windows", [])
|
|
767
|
+
if isinstance(r.get("window_hours"), int)
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
spend_24h = 0.0
|
|
771
|
+
spend_7d = 0.0
|
|
772
|
+
|
|
773
|
+
if 24 in windows:
|
|
774
|
+
spend_24h = float(windows[24].get("total_spend_cents", 0)) / 100.0
|
|
775
|
+
if 168 in windows:
|
|
776
|
+
spend_7d = float(windows[168].get("total_spend_cents", 0)) / 100.0
|
|
777
|
+
|
|
778
|
+
balance_widget.update_global(balance, spend_24h, spend_7d)
|
|
779
|
+
else:
|
|
780
|
+
# Fallback to just balance
|
|
781
|
+
balance_widget.update_global(balance, 0.0, 0.0)
|
|
782
|
+
except Exception:
|
|
783
|
+
# Fallback to just balance
|
|
784
|
+
balance_widget.update_global(balance, 0.0, 0.0)
|
|
785
|
+
|
|
786
|
+
except Exception as e:
|
|
787
|
+
# Only show error if it's not just "endpoint not available"
|
|
788
|
+
error_msg = str(e)
|
|
789
|
+
if "500" in error_msg or "Internal Server Error" in error_msg:
|
|
790
|
+
# Backend endpoint not implemented yet
|
|
791
|
+
balance_widget.set_global_unavailable()
|
|
792
|
+
else:
|
|
793
|
+
balance_widget.set_global_error(error_msg[:30])
|
|
794
|
+
|
|
795
|
+
async def action_quit(self) -> None:
|
|
796
|
+
"""Quit the application."""
|
|
797
|
+
if self.refresh_timer:
|
|
798
|
+
self.refresh_timer.stop()
|
|
799
|
+
if self.db_manager:
|
|
800
|
+
await self.db_manager.close()
|
|
801
|
+
self.exit()
|
|
802
|
+
|
|
803
|
+
async def _connect_to_database(self) -> None:
|
|
804
|
+
"""Connect to the current database."""
|
|
805
|
+
db_status = self.query_one("#db-status", DatabaseStatus)
|
|
806
|
+
balance_widget = self.query_one("#balance", BalanceStatus)
|
|
807
|
+
|
|
808
|
+
try:
|
|
809
|
+
# Close existing connection if any
|
|
810
|
+
if self.db_manager:
|
|
811
|
+
await self.db_manager.close()
|
|
812
|
+
self.db_manager = None
|
|
813
|
+
|
|
814
|
+
# Get current database info
|
|
815
|
+
db_name, db_path = self.available_dbs[self.current_db_index]
|
|
816
|
+
self.db_url = f"sqlite+aiosqlite:///{db_path}"
|
|
817
|
+
|
|
818
|
+
logging.info(f"Connecting to database: {db_name} ({db_path})")
|
|
819
|
+
|
|
820
|
+
self.db_manager = NativeLibsqlTraceManager(self.db_url)
|
|
821
|
+
if self.db_manager:
|
|
822
|
+
await self.db_manager.initialize()
|
|
823
|
+
db_status.set_connected(self.db_url, db_name)
|
|
824
|
+
db_status.set_db_selector(self.current_db_index, len(self.available_dbs))
|
|
825
|
+
|
|
826
|
+
# Immediately refresh local stats after connecting
|
|
827
|
+
logging.info("Refreshing local stats after connection...")
|
|
828
|
+
await self._refresh_local_stats(balance_widget)
|
|
829
|
+
else:
|
|
830
|
+
db_status.set_disconnected("Database manager not available")
|
|
831
|
+
balance_widget.set_local_error("No manager")
|
|
832
|
+
except (ImportError, ModuleNotFoundError):
|
|
833
|
+
# Database dependencies not available
|
|
834
|
+
db_status.set_disconnected("Database dependencies missing (libsql)")
|
|
835
|
+
self.db_manager = None
|
|
836
|
+
balance_widget.set_local_error("No libsql")
|
|
837
|
+
except Exception as e:
|
|
838
|
+
logging.error(f"Failed to connect to database: {e}", exc_info=True)
|
|
839
|
+
db_status.set_disconnected(str(e))
|
|
840
|
+
self.db_manager = None
|
|
841
|
+
balance_widget.set_local_error(str(e)[:15])
|
|
842
|
+
|
|
843
|
+
async def action_next_db(self) -> None:
|
|
844
|
+
"""Switch to next database."""
|
|
845
|
+
if len(self.available_dbs) <= 1:
|
|
846
|
+
return
|
|
847
|
+
|
|
848
|
+
self.current_db_index = (self.current_db_index + 1) % len(self.available_dbs)
|
|
849
|
+
await self._connect_to_database()
|
|
850
|
+
await self.action_refresh()
|
|
851
|
+
|
|
852
|
+
async def action_prev_db(self) -> None:
|
|
853
|
+
"""Switch to previous database."""
|
|
854
|
+
if len(self.available_dbs) <= 1:
|
|
855
|
+
return
|
|
856
|
+
|
|
857
|
+
self.current_db_index = (self.current_db_index - 1) % len(self.available_dbs)
|
|
858
|
+
await self._connect_to_database()
|
|
859
|
+
await self.action_refresh()
|
|
860
|
+
|
|
861
|
+
def action_toggle_debug(self) -> None:
|
|
862
|
+
"""Toggle debug mode."""
|
|
863
|
+
# Could add debug panel or logging level toggle
|
|
864
|
+
pass
|
|
865
|
+
|
|
866
|
+
@on(DataTable.RowHighlighted, "#experiments")
|
|
867
|
+
def on_experiment_selected(self, event: DataTable.RowHighlighted) -> None:
|
|
868
|
+
"""Handle experiment selection."""
|
|
869
|
+
exp_table = self.query_one("#experiments", ExperimentTable)
|
|
870
|
+
selected_exp = exp_table.get_selected_experiment()
|
|
871
|
+
|
|
872
|
+
detail_panel = self.query_one("#detail", ExperimentDetail)
|
|
873
|
+
detail_panel.update_experiment(selected_exp)
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
def main(argv: list[str] | None = None):
|
|
877
|
+
"""Main entry point for the dashboard."""
|
|
878
|
+
# Check if textual is available
|
|
879
|
+
if not _TEXTUAL_AVAILABLE:
|
|
880
|
+
print("❌ Textual library is not available. Please install it with: pip install textual")
|
|
881
|
+
return
|
|
882
|
+
|
|
883
|
+
import argparse
|
|
884
|
+
import os
|
|
885
|
+
|
|
886
|
+
parser = argparse.ArgumentParser(description="Synth AI Interactive Dashboard")
|
|
887
|
+
parser.add_argument(
|
|
888
|
+
"-u",
|
|
889
|
+
"--url",
|
|
890
|
+
default=os.getenv("TUI_DB_URL", "sqlite+aiosqlite:///traces/v3/synth_ai.db"),
|
|
891
|
+
help="Database URL (default: traces/v3/synth_ai.db)",
|
|
892
|
+
)
|
|
893
|
+
parser.add_argument("--debug", action="store_true", default=bool(os.getenv("TUI_DEBUG")), help="Enable debug logging")
|
|
894
|
+
|
|
895
|
+
args = parser.parse_args(argv)
|
|
896
|
+
|
|
897
|
+
if args.debug:
|
|
898
|
+
logging.basicConfig(level=logging.DEBUG)
|
|
899
|
+
|
|
900
|
+
# Run the dashboard
|
|
901
|
+
app = SynthDashboard(db_url=args.url)
|
|
902
|
+
app.run()
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
if __name__ == "__main__":
|
|
906
|
+
main()
|