synth-ai 0.2.4.dev8__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (112) hide show
  1. synth_ai/__init__.py +1 -1
  2. synth_ai/cli/__init__.py +6 -0
  3. synth_ai/cli/demo.py +68 -9
  4. synth_ai/cli/rl_demo.py +137 -0
  5. synth_ai/cli/root.py +65 -0
  6. synth_ai/demos/core/__init__.py +1 -0
  7. synth_ai/demos/core/cli.py +685 -0
  8. synth_ai/demos/demo_task_apps/__init__.py +1 -0
  9. synth_ai/demos/demo_task_apps/core.py +374 -0
  10. synth_ai/demos/demo_task_apps/math/__init__.py +1 -0
  11. synth_ai/demos/demo_task_apps/math/app.py +37 -0
  12. synth_ai/demos/demo_task_apps/math/config.toml +44 -0
  13. synth_ai/demos/demo_task_apps/math/deploy_modal.py +60 -0
  14. synth_ai/demos/demo_task_apps/math/deploy_task_app.sh +22 -0
  15. synth_ai/environments/examples/bandit/__init__.py +33 -0
  16. synth_ai/environments/examples/bandit/engine.py +294 -0
  17. synth_ai/environments/examples/bandit/environment.py +194 -0
  18. synth_ai/environments/examples/bandit/taskset.py +200 -0
  19. synth_ai/environments/examples/crafter_classic/agent_demos/analyze_semantic_words_markdown.py +250 -0
  20. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +59 -0
  21. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
  22. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_config.toml +24 -0
  23. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
  24. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/crafter_synth_config.toml +56 -0
  25. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_config_modal.toml +32 -0
  26. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +724 -0
  27. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/kick_off_ft_modal.py +384 -0
  28. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_action_results.py +53 -0
  29. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_agent_actions.py +178 -0
  30. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_latest_run.py +222 -0
  31. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_lm_traces.py +183 -0
  32. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_no_rewards.py +210 -0
  33. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_trace_issue.py +206 -0
  34. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_db_schema.py +49 -0
  35. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_latest_results.py +64 -0
  36. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/debug_agent_responses.py +88 -0
  37. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/quick_trace_check.py +77 -0
  38. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/compare_experiments.py +324 -0
  39. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +580 -0
  40. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/kick_off_ft_oai.py +362 -0
  41. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/multi_model_config.toml +49 -0
  42. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_enhanced_hooks.py +332 -0
  43. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_events.py +97 -0
  44. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_results.py +217 -0
  45. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_hook_storage.py +87 -0
  46. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_seeds.py +88 -0
  47. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/compare_seed_performance.py +195 -0
  48. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/custom_eval_pipelines.py +400 -0
  49. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/plot_hook_frequency.py +195 -0
  50. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/seed_analysis_summary.py +56 -0
  51. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/run_rollouts_for_models_and_compare_v3.py +858 -0
  52. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +52 -0
  53. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +874 -0
  54. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
  55. synth_ai/environments/examples/crafter_classic/agent_demos/example_v3_usage.py +216 -0
  56. synth_ai/environments/examples/crafter_classic/agent_demos/old/compare_traces.py +296 -0
  57. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_comprehensive_evaluation.py +58 -0
  58. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_env_serialization.py +464 -0
  59. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_evaluation_browser.py +152 -0
  60. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_quick_evaluation.py +51 -0
  61. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_trace_evaluation.py +1412 -0
  62. synth_ai/environments/examples/crafter_classic/agent_demos/old/debug_player_loss.py +112 -0
  63. synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_service.py +203 -0
  64. synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_slowness.py +305 -0
  65. synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_by_difficulty.py +126 -0
  66. synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_example.py +94 -0
  67. synth_ai/environments/examples/crafter_classic/agent_demos/old/explore_saved_states.py +142 -0
  68. synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft.py +26 -0
  69. synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft_OLD.py +984 -0
  70. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_gemini.py +724 -0
  71. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_modal.py +386 -0
  72. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_metadata.py +205 -0
  73. synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_gemini.py +150 -0
  74. synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_modal.py +283 -0
  75. synth_ai/environments/examples/crafter_classic/agent_demos/old/prepare_vertex_ft.py +280 -0
  76. synth_ai/environments/examples/crafter_classic/agent_demos/old/profile_env_slowness.py +456 -0
  77. synth_ai/environments/examples/crafter_classic/agent_demos/old/replicate_issue.py +166 -0
  78. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_and_eval.py +102 -0
  79. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_comparison.py +128 -0
  80. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_qwen_rollouts.py +655 -0
  81. synth_ai/environments/examples/crafter_classic/agent_demos/old/trace_eval_OLD.py +202 -0
  82. synth_ai/environments/examples/crafter_classic/agent_demos/old/validate_openai_format.py +166 -0
  83. synth_ai/environments/examples/crafter_classic/environment.py +41 -2
  84. synth_ai/environments/examples/crafter_custom/agent_demos/__init__.py +1 -0
  85. synth_ai/environments/examples/crafter_custom/agent_demos/trace_eval.py +202 -0
  86. synth_ai/environments/examples/crafter_custom/old/analyze_diamond_issue.py +159 -0
  87. synth_ai/environments/examples/crafter_custom/old/analyze_diamond_spawning.py +158 -0
  88. synth_ai/environments/examples/crafter_custom/old/compare_worlds.py +71 -0
  89. synth_ai/environments/examples/crafter_custom/old/dataset_stats.py +105 -0
  90. synth_ai/environments/examples/crafter_custom/old/diamond_spawning_summary.py +119 -0
  91. synth_ai/environments/examples/crafter_custom/old/example_dataset_usage.py +52 -0
  92. synth_ai/environments/examples/enron/units/keyword_stats.py +112 -0
  93. synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
  94. synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +48 -0
  95. synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
  96. synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +221 -0
  97. synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
  98. synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
  99. synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +831 -0
  100. synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
  101. synth_ai/environments/examples/red/units/__init__.py +1 -0
  102. synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +899 -0
  103. synth_ai/environments/examples/sokoban/units/astar_common.py +95 -0
  104. synth_ai/environments/service/app.py +8 -0
  105. synth_ai/install_sqld.sh +40 -0
  106. synth_ai-0.2.5.dist-info/METADATA +106 -0
  107. {synth_ai-0.2.4.dev8.dist-info → synth_ai-0.2.5.dist-info}/RECORD +111 -12
  108. {synth_ai-0.2.4.dev8.dist-info → synth_ai-0.2.5.dist-info}/entry_points.txt +1 -0
  109. synth_ai-0.2.4.dev8.dist-info/METADATA +0 -635
  110. {synth_ai-0.2.4.dev8.dist-info → synth_ai-0.2.5.dist-info}/WHEEL +0 -0
  111. {synth_ai-0.2.4.dev8.dist-info → synth_ai-0.2.5.dist-info}/licenses/LICENSE +0 -0
  112. {synth_ai-0.2.4.dev8.dist-info → synth_ai-0.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,724 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Filter traces from Turso/SQLite (v3) to create Modal/Synth SFT-ready .jsonl files
4
+ Supports two modes:
5
+ 1. Trajectory-level filtering: Include entire trajectories above a score threshold
6
+ 2. Window-based filtering: Extract high-scoring windows of actions
7
+
8
+ This is the v3 version using the new async Turso-based tracing system.
9
+ """
10
+
11
+ import json
12
+ import argparse
13
+ import asyncio
14
+ from pathlib import Path
15
+ from typing import List, Dict, Any, Tuple, Optional
16
+ from collections import defaultdict
17
+ import numpy as np
18
+ import os
19
+ import sys
20
+ import toml
21
+ import pandas as pd
22
+
23
+ # Add synth_ai to path
24
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent.parent.parent))
25
+
26
+ from synth_ai.tracing_v3 import SessionTracer
27
+ from synth_ai.tracing_v3.turso.manager import AsyncSQLTraceManager
28
+ from synth_ai.tracing_v3.abstractions import LMCAISEvent, EnvironmentEvent, RuntimeEvent
29
+
30
+
31
+ def create_histogram(data: List[float], bins: int = 20, width: int = 60, height: int = 15,
32
+ title: str = "", x_label: str = "", y_label: str = "") -> str:
33
+ """Create a beautiful ASCII histogram."""
34
+ if not data:
35
+ return "No data to display"
36
+
37
+ # Create histogram
38
+ counts, edges = np.histogram(data, bins=bins)
39
+ max_count = max(counts) if len(counts) > 0 else 1
40
+
41
+ # Normalize heights
42
+ if max_count > 0:
43
+ heights = [int(c * height / max_count) for c in counts]
44
+ else:
45
+ heights = [0] * len(counts)
46
+
47
+ # Build the plot
48
+ lines = []
49
+
50
+ # Title
51
+ if title:
52
+ lines.append(f"\n{title.center(width + 10)}")
53
+ lines.append("=" * (width + 10))
54
+
55
+ # Y-axis label
56
+ if y_label:
57
+ lines.append(f"{y_label}")
58
+
59
+ # Plot area with y-axis
60
+ for y in range(height, 0, -1):
61
+ # Y-axis value
62
+ y_val = int(max_count * y / height)
63
+ line = f"{y_val:>6} │"
64
+
65
+ # Bars
66
+ for h in heights:
67
+ if h >= y:
68
+ line += "ā–ˆ"
69
+ else:
70
+ line += " "
71
+
72
+ lines.append(line)
73
+
74
+ # X-axis
75
+ lines.append(f"{'':>6} ā””" + "─" * len(heights))
76
+
77
+ # X-axis labels
78
+ x_labels_line = " " * 8
79
+ min_val, max_val = min(data), max(data)
80
+
81
+ # Add labels at key positions
82
+ label_positions = [0, len(heights)//4, len(heights)//2, 3*len(heights)//4, len(heights)-1]
83
+ for i, pos in enumerate(label_positions):
84
+ if pos < len(edges) - 1:
85
+ val = edges[pos]
86
+ label = f"{val:.1f}"
87
+ # Calculate position
88
+ target_pos = 8 + pos
89
+ if i == 0:
90
+ x_labels_line = label + x_labels_line[len(label):]
91
+ elif i == len(label_positions) - 1:
92
+ start = max(0, target_pos - len(label))
93
+ x_labels_line = x_labels_line[:start] + label
94
+ else:
95
+ start = max(0, target_pos - len(label)//2)
96
+ end = min(len(x_labels_line), start + len(label))
97
+ if start < len(x_labels_line):
98
+ x_labels_line = x_labels_line[:start] + label[:end-start] + x_labels_line[end:]
99
+
100
+ lines.append(x_labels_line)
101
+
102
+ # X-axis label
103
+ if x_label:
104
+ lines.append(f"\n{x_label.center(width + 10)}")
105
+
106
+ return "\n".join(lines)
107
+
108
+
109
+ def create_bar_chart(categories: List[str], values: List[int], width: int = 60,
110
+ title: str = "", show_values: bool = True) -> str:
111
+ """Create a horizontal bar chart."""
112
+ if not categories or not values:
113
+ return "No data to display"
114
+
115
+ max_val = max(values) if values else 1
116
+ lines = []
117
+
118
+ # Title
119
+ if title:
120
+ lines.append(f"\n{title}")
121
+ lines.append("=" * (width + 20))
122
+
123
+ # Find longest category name for alignment
124
+ max_cat_len = max(len(cat) for cat in categories)
125
+
126
+ # Create bars
127
+ for cat, val in zip(categories, values):
128
+ # Normalize bar length
129
+ bar_len = int(val * width / max_val) if max_val > 0 else 0
130
+ bar = "ā–ˆ" * bar_len
131
+
132
+ # Format line
133
+ if show_values:
134
+ line = f"{cat:<{max_cat_len}} │ {bar} {val}"
135
+ else:
136
+ line = f"{cat:<{max_cat_len}} │ {bar}"
137
+
138
+ lines.append(line)
139
+
140
+ return "\n".join(lines)
141
+
142
+
143
+ class FinetuningDataExtractorV3:
144
+ """Extract fine-tuning data from v3 Turso traces."""
145
+
146
+ def __init__(self, db_url: str):
147
+ self.db_manager = AsyncSQLTraceManager(db_url)
148
+ self._initialized = False
149
+
150
+ async def __aenter__(self):
151
+ await self.db_manager.initialize()
152
+ self._initialized = True
153
+ return self
154
+
155
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
156
+ await self.db_manager.close()
157
+
158
+ async def get_all_sessions(self) -> pd.DataFrame:
159
+ """Get all session IDs from the database."""
160
+ query = """
161
+ SELECT DISTINCT session_id, created_at
162
+ FROM session_traces
163
+ ORDER BY created_at DESC
164
+ """
165
+ return await self.db_manager.query_traces(query)
166
+
167
+ async def get_session_metrics(self, session_id: str) -> Dict[str, Any]:
168
+ """Get metrics for a specific session."""
169
+ # Get total reward from environment events
170
+ # Now that rewards are properly saved in the DB, we can use them directly
171
+ reward_query = """
172
+ SELECT COALESCE(SUM(reward), 0) as total_reward
173
+ FROM events
174
+ WHERE session_id = :session_id
175
+ AND event_type = 'environment'
176
+ AND reward IS NOT NULL
177
+ """
178
+ reward_df = await self.db_manager.query_traces(reward_query, {"session_id": session_id})
179
+ total_reward = float(reward_df['total_reward'].iloc[0]) if not reward_df.empty else 0.0
180
+
181
+ # Get total tokens and cost from LM events
182
+ lm_query = """
183
+ SELECT
184
+ COALESCE(SUM(total_tokens), 0) as total_tokens,
185
+ COALESCE(SUM(cost_usd) / 100.0, 0) as total_cost
186
+ FROM events
187
+ WHERE session_id = :session_id
188
+ AND event_type = 'cais'
189
+ """
190
+ lm_df = await self.db_manager.query_traces(lm_query, {"session_id": session_id})
191
+
192
+ total_tokens = int(lm_df['total_tokens'].iloc[0]) if not lm_df.empty else 0
193
+ total_cost = float(lm_df['total_cost'].iloc[0]) if not lm_df.empty else 0.0
194
+
195
+ return {
196
+ 'session_id': session_id,
197
+ 'total_reward': total_reward,
198
+ 'total_tokens': total_tokens,
199
+ 'total_cost': total_cost
200
+ }
201
+
202
+ async def get_session_achievements(self, session_id: str) -> List[str]:
203
+ """Get list of achievements unlocked in a session.
204
+
205
+ Aggregates across ALL environment events with a non-null system_state_after,
206
+ unioning any flags that were ever true. This is more robust than inspecting
207
+ only the last event, which can miss transient unlocks.
208
+ """
209
+ query = """
210
+ SELECT system_state_after
211
+ FROM events
212
+ WHERE session_id = :session_id
213
+ AND event_type = 'environment'
214
+ AND system_state_after IS NOT NULL
215
+ ORDER BY id ASC
216
+ """
217
+ df = await self.db_manager.query_traces(query, {"session_id": session_id})
218
+
219
+ if df.empty:
220
+ return []
221
+
222
+ unlocked: Dict[str, bool] = {}
223
+ for _, row in df.iterrows():
224
+ try:
225
+ state_after = row['system_state_after']
226
+ if not state_after:
227
+ continue
228
+ if isinstance(state_after, str):
229
+ state_after = json.loads(state_after)
230
+ if not isinstance(state_after, dict):
231
+ continue
232
+ public_state = state_after.get('public_state')
233
+ if not isinstance(public_state, dict):
234
+ continue
235
+ ach = public_state.get('achievements_status')
236
+ if not isinstance(ach, dict):
237
+ continue
238
+ for name, flag in ach.items():
239
+ if flag:
240
+ unlocked[name] = True
241
+ except Exception as e:
242
+ print(f"Error parsing achievements row: {e}")
243
+ continue
244
+
245
+ return [k for k, v in unlocked.items() if v]
246
+
247
+ async def filter_by_achievements(self, min_achievements: int) -> List[str]:
248
+ """Get sessions with at least min_achievements unlocked."""
249
+ all_sessions = await self.get_all_sessions()
250
+ qualifying_sessions = []
251
+
252
+ for _, row in all_sessions.iterrows():
253
+ session_id = row['session_id']
254
+ achievements = await self.get_session_achievements(session_id)
255
+ if len(achievements) >= min_achievements:
256
+ qualifying_sessions.append(session_id)
257
+
258
+ return qualifying_sessions
259
+
260
+ async def extract_openai_format(self, session_ids: List[str], min_reward: float = 0.0) -> List[Dict[str, Any]]:
261
+ """Extract training data in OpenAI format from filtered sessions."""
262
+ training_data = []
263
+
264
+ for session_id in session_ids:
265
+ # Get messages directly from the messages table
266
+ messages_query = """
267
+ SELECT m.message_type, m.content, m.message_time, st.turn_number
268
+ FROM messages m
269
+ LEFT JOIN session_timesteps st ON m.timestep_id = st.id
270
+ WHERE m.session_id = :session_id
271
+ ORDER BY COALESCE(st.turn_number, m.message_time), m.id
272
+ """
273
+
274
+ messages_df = await self.db_manager.query_traces(messages_query, {"session_id": session_id})
275
+
276
+ if len(messages_df) == 0:
277
+ continue
278
+
279
+ # Build conversation history
280
+ messages = []
281
+ system_message = None
282
+
283
+ for _, row in messages_df.iterrows():
284
+ msg_type = row['message_type']
285
+ content = row['content']
286
+
287
+ # Parse content if it's JSON (from origin_system_id format)
288
+ try:
289
+ import json
290
+ content_data = json.loads(content)
291
+ if isinstance(content_data, dict) and 'payload' in content_data:
292
+ content = content_data['payload']
293
+ except:
294
+ pass
295
+
296
+ if msg_type == 'system' and system_message is None:
297
+ # Extract system message from the first system message
298
+ if isinstance(content, str):
299
+ system_message = content
300
+
301
+ elif msg_type == 'user':
302
+ # Format user messages
303
+ if isinstance(content, dict):
304
+ # Convert observation dict to formatted string
305
+ content = self._format_observation_content(content)
306
+ messages.append({"role": "user", "content": str(content)})
307
+
308
+ elif msg_type == 'assistant':
309
+ messages.append({"role": "assistant", "content": str(content)})
310
+
311
+ # Add system message at the beginning if found
312
+ if system_message:
313
+ messages.insert(0, {"role": "system", "content": system_message})
314
+
315
+ # Only include if we have a complete conversation
316
+ if len(messages) > 1:
317
+ # Get total reward for this session
318
+ reward_query = """
319
+ SELECT COALESCE(SUM(reward), 0) as total_reward
320
+ FROM events
321
+ WHERE session_id = :session_id
322
+ AND event_type = 'environment'
323
+ AND reward IS NOT NULL
324
+ """
325
+ reward_df = await self.db_manager.query_traces(reward_query, {"session_id": session_id})
326
+ total_reward = reward_df.iloc[0]['total_reward'] if len(reward_df) > 0 else 0
327
+
328
+ training_data.append({
329
+ "messages": messages,
330
+ "metadata": {
331
+ "session_id": session_id,
332
+ "total_reward": float(total_reward) # Convert to float for JSON serialization
333
+ }
334
+ })
335
+
336
+ return training_data
337
+
338
+ async def extract_openai_window_format(self, session_ids: List[str]) -> List[Dict[str, Any]]:
339
+ """Extract per-turn user→assistant pairs (window mode) for SFT.
340
+
341
+ Emits one example per assistant message, pairing it with the preceding user
342
+ message in the same turn (based on session_timesteps.turn_number).
343
+ """
344
+ window_data: List[Dict[str, Any]] = []
345
+
346
+ for session_id in session_ids:
347
+ messages_query = """
348
+ SELECT st.turn_number, m.message_type, m.content, m.id AS message_id
349
+ FROM messages m
350
+ LEFT JOIN session_timesteps st ON m.timestep_id = st.id
351
+ WHERE m.session_id = :session_id
352
+ ORDER BY COALESCE(st.turn_number, m.message_time), m.id
353
+ """
354
+ df = await self.db_manager.query_traces(messages_query, {"session_id": session_id})
355
+ if df is None or df.empty:
356
+ continue
357
+
358
+ # Parse content and group by turn_number
359
+ parsed_rows: List[Dict[str, Any]] = []
360
+ for _, row in df.iterrows():
361
+ msg_type = row.get('message_type')
362
+ content = row.get('content')
363
+ try:
364
+ content_data = json.loads(content)
365
+ if isinstance(content_data, dict) and 'payload' in content_data:
366
+ content = content_data['payload']
367
+ except Exception:
368
+ pass
369
+ parsed_rows.append({
370
+ 'turn_number': row.get('turn_number'),
371
+ 'message_type': msg_type,
372
+ 'content': content,
373
+ })
374
+
375
+ # Build windows per turn_number
376
+ from collections import defaultdict
377
+ turn_to_msgs: Dict[int, List[Dict[str, Any]]] = defaultdict(list)
378
+ for r in parsed_rows:
379
+ tn = r.get('turn_number')
380
+ if tn is None:
381
+ # Skip rows that aren't associated with a turn
382
+ continue
383
+ turn_to_msgs[int(tn)].append(r)
384
+
385
+ # For each turn, find user -> assistant pair(s)
386
+ for tn in sorted(turn_to_msgs.keys()):
387
+ msgs = turn_to_msgs[tn]
388
+ # find last user before first assistant
389
+ user_content: Optional[str] = None
390
+ assistant_content: Optional[str] = None
391
+ for r in msgs:
392
+ if r['message_type'] == 'user':
393
+ user_content = r['content']
394
+ elif r['message_type'] == 'assistant' and assistant_content is None:
395
+ assistant_content = r['content']
396
+ if user_content and assistant_content:
397
+ window_data.append({
398
+ 'messages': [
399
+ { 'role': 'user', 'content': str(user_content) },
400
+ { 'role': 'assistant', 'content': str(assistant_content) },
401
+ ],
402
+ 'metadata': {
403
+ 'session_id': session_id,
404
+ 'turn_number': tn,
405
+ }
406
+ })
407
+
408
+ return window_data
409
+
410
+ def _format_observation_content(self, obs: Dict[str, Any]) -> str:
411
+ """Format observation dict into a readable string."""
412
+ if not isinstance(obs, dict):
413
+ return str(obs)
414
+
415
+ # Extract key fields for a concise representation
416
+ parts = []
417
+
418
+ if 'inventory' in obs:
419
+ inv = obs['inventory']
420
+ inv_str = ", ".join([f"{k}: {v}" for k, v in inv.items() if v > 0])
421
+ if inv_str:
422
+ parts.append(f"Inventory: {inv_str}")
423
+
424
+ if 'achievements_status' in obs:
425
+ achievements = [k for k, v in obs['achievements_status'].items() if v]
426
+ if achievements:
427
+ parts.append(f"Achievements: {', '.join(achievements)}")
428
+
429
+ if 'health' in obs:
430
+ parts.append(f"Health: {obs.get('health', 0)}")
431
+
432
+ return "; ".join(parts) if parts else "Empty observation"
433
+
434
+
435
+ async def filter_traces_from_turso(
436
+ db_url: str,
437
+ output_path: str,
438
+ config: Dict[str, Any]
439
+ ) -> Tuple[int, Dict[str, Any]]:
440
+ """
441
+ Filter traces from Turso/SQLite v3 database based on configuration.
442
+
443
+ Returns:
444
+ Tuple of (num_examples, statistics_dict)
445
+ """
446
+ mode = config.get("mode", "trajectory")
447
+ filters = config.get("filters", {})
448
+
449
+ # Extract filtering parameters
450
+ min_reward = filters.get("min_total_reward", 0.0)
451
+ min_achievements = filters.get("min_achievements", 0)
452
+ max_cost = filters.get("max_cost", float('inf'))
453
+ max_tokens = filters.get("max_tokens", float('inf'))
454
+
455
+ # Modal/Synth specific: filter by model if specified
456
+ target_models = filters.get("models", [])
457
+
458
+ statistics = {
459
+ "total_sessions": 0,
460
+ "filtered_sessions": 0,
461
+ "total_examples": 0,
462
+ "reward_distribution": [],
463
+ "token_distribution": [],
464
+ "cost_distribution": [],
465
+ "model_distribution": defaultdict(int)
466
+ }
467
+
468
+ async with FinetuningDataExtractorV3(db_url) as extractor:
469
+ # Get all sessions
470
+ all_sessions = await extractor.get_all_sessions()
471
+ statistics["total_sessions"] = len(all_sessions)
472
+
473
+ # Filter sessions based on criteria
474
+ filtered_sessions = []
475
+
476
+ for _, row in all_sessions.iterrows():
477
+ session_id = row['session_id']
478
+ metrics = await extractor.get_session_metrics(session_id)
479
+
480
+ # Apply filters
481
+ if metrics['total_reward'] < min_reward:
482
+ continue
483
+ if metrics['total_cost'] > max_cost:
484
+ continue
485
+ if metrics['total_tokens'] > max_tokens:
486
+ continue
487
+
488
+ # Check achievements if required
489
+ if min_achievements > 0:
490
+ achievements = await extractor.get_session_achievements(session_id)
491
+ if len(achievements) < min_achievements:
492
+ continue
493
+
494
+ # Check model filter if specified
495
+ if target_models:
496
+ model_query = """
497
+ SELECT DISTINCT model_name
498
+ FROM events
499
+ WHERE session_id = :session_id
500
+ AND event_type = 'cais'
501
+ AND model_name IS NOT NULL
502
+ """
503
+ model_df = await extractor.db_manager.query_traces(
504
+ model_query, {"session_id": session_id}
505
+ )
506
+ session_models = model_df['model_name'].tolist() if not model_df.empty else []
507
+ if not any(model in target_models for model in session_models):
508
+ continue
509
+
510
+ filtered_sessions.append(session_id)
511
+
512
+ # Collect statistics
513
+ statistics["reward_distribution"].append(metrics['total_reward'])
514
+ statistics["token_distribution"].append(metrics['total_tokens'])
515
+ statistics["cost_distribution"].append(metrics['total_cost'])
516
+
517
+ statistics["filtered_sessions"] = len(filtered_sessions)
518
+
519
+ # Extract training data
520
+ if mode == "trajectory":
521
+ training_data = await extractor.extract_openai_format(
522
+ session_ids=filtered_sessions,
523
+ min_reward=min_reward
524
+ )
525
+ else: # window mode
526
+ # For window mode, we need to implement window extraction
527
+ # For now, use trajectory mode
528
+ training_data = await extractor.extract_openai_format(
529
+ session_ids=filtered_sessions,
530
+ min_reward=min_reward
531
+ )
532
+
533
+ statistics["total_examples"] = len(training_data)
534
+
535
+ # Write to output file
536
+ output_file = Path(output_path)
537
+ output_file.parent.mkdir(exist_ok=True)
538
+
539
+ with open(output_file, 'w') as f:
540
+ for example in training_data:
541
+ f.write(json.dumps(example) + '\n')
542
+
543
+ # Get model distribution
544
+ model_query = """
545
+ SELECT model_name, COUNT(*) as count
546
+ FROM events
547
+ WHERE event_type = 'cais'
548
+ AND model_name IS NOT NULL
549
+ GROUP BY model_name
550
+ """
551
+ model_stats = await extractor.db_manager.query_traces(model_query)
552
+ for _, row in model_stats.iterrows():
553
+ statistics["model_distribution"][row['model_name']] = int(row['count'])
554
+
555
+ return len(training_data), statistics
556
+
557
+
558
+ def print_statistics(stats: Dict[str, Any]):
559
+ """Print filtering statistics with visualizations."""
560
+ print("\n" + "="*80)
561
+ print("FILTERING STATISTICS (Modal/Synth - v3)")
562
+ print("="*80)
563
+
564
+ # Basic counts
565
+ print(f"\nTotal sessions in database: {stats['total_sessions']}")
566
+ print(f"Sessions after filtering: {stats['filtered_sessions']}")
567
+ print(f"Training examples generated: {stats['total_examples']}")
568
+
569
+ filter_rate = (stats['filtered_sessions'] / stats['total_sessions'] * 100) if stats['total_sessions'] > 0 else 0
570
+ print(f"Filter pass rate: {filter_rate:.1f}%")
571
+
572
+ # Reward distribution
573
+ if stats['reward_distribution'] and any(not np.isnan(x) for x in stats['reward_distribution']):
574
+ valid_rewards = [x for x in stats['reward_distribution'] if not np.isnan(x)]
575
+ if valid_rewards:
576
+ print(create_histogram(
577
+ valid_rewards,
578
+ bins=20,
579
+ title="Reward Distribution",
580
+ x_label="Total Reward",
581
+ y_label="Count"
582
+ ))
583
+
584
+ print(f"\nReward statistics:")
585
+ print(f" Min: {min(valid_rewards):.2f}")
586
+ print(f" Max: {max(valid_rewards):.2f}")
587
+ print(f" Mean: {np.mean(valid_rewards):.2f}")
588
+ print(f" Median: {np.median(valid_rewards):.2f}")
589
+ else:
590
+ print("\nNo valid reward data to display.")
591
+
592
+ # Token distribution
593
+ if stats['token_distribution'] and any(not np.isnan(x) for x in stats['token_distribution']):
594
+ valid_tokens = [x for x in stats['token_distribution'] if not np.isnan(x)]
595
+ if valid_tokens:
596
+ print(create_histogram(
597
+ valid_tokens,
598
+ bins=20,
599
+ title="Token Usage Distribution",
600
+ x_label="Total Tokens",
601
+ y_label="Count"
602
+ ))
603
+
604
+ # Model distribution
605
+ if stats['model_distribution']:
606
+ models = list(stats['model_distribution'].keys())
607
+ counts = list(stats['model_distribution'].values())
608
+ print(create_bar_chart(
609
+ models,
610
+ counts,
611
+ title="Model Usage",
612
+ show_values=True
613
+ ))
614
+
615
+ print("\n" + "="*80)
616
+
617
+
618
+ def main():
619
+ parser = argparse.ArgumentParser(
620
+ description="Filter traces from Turso/SQLite v3 for Modal/Synth fine-tuning",
621
+ formatter_class=argparse.RawDescriptionHelpFormatter,
622
+ epilog="""
623
+ Example usage:
624
+ # Use default config
625
+ python filter_traces_sft_turso.py -d sqlite:///traces.db -o ft_data/training.jsonl
626
+
627
+ # Use custom config file
628
+ python filter_traces_sft_turso.py -d sqlite:///traces.db -c filter_config.toml
629
+
630
+ # Override config parameters
631
+ python filter_traces_sft_turso.py -d sqlite:///traces.db --min-reward 5.0 --max-cost 0.1
632
+
633
+ # Filter by model
634
+ python filter_traces_sft_turso.py -d sqlite:///traces.db --models "Qwen/Qwen2.5-7B-Instruct"
635
+ """
636
+ )
637
+
638
+ parser.add_argument('-d', '--database', required=True, help='Path to Turso/SQLite database or connection URL')
639
+ parser.add_argument('-o', '--output', default='ft_data/training_modal.jsonl', help='Output JSONL file')
640
+ parser.add_argument('-c', '--config', help='Configuration TOML file')
641
+
642
+ # Filter overrides
643
+ parser.add_argument('--mode', choices=['trajectory', 'window'], help='Filtering mode')
644
+ parser.add_argument('--min-reward', type=float, help='Minimum total reward')
645
+ parser.add_argument('--min-achievements', type=int, help='Minimum achievements')
646
+ parser.add_argument('--max-cost', type=float, help='Maximum cost')
647
+ parser.add_argument('--max-tokens', type=int, help='Maximum tokens')
648
+ parser.add_argument('--models', nargs='+', help='Filter by model names (e.g., Qwen/Qwen2.5-7B-Instruct)')
649
+
650
+ parser.add_argument('--dry-run', action='store_true', help='Show statistics without writing output')
651
+
652
+ args = parser.parse_args()
653
+
654
+ # Load config
655
+ config = {
656
+ "mode": "trajectory",
657
+ "filters": {
658
+ "min_total_reward": 1.0,
659
+ "min_achievements": 0,
660
+ "max_cost": 10.0,
661
+ "max_tokens": 100000,
662
+ "models": [] # Empty means all models
663
+ }
664
+ }
665
+
666
+ if args.config:
667
+ with open(args.config, 'r') as f:
668
+ loaded_config = toml.load(f)
669
+ config.update(loaded_config)
670
+
671
+ # Apply command-line overrides
672
+ if args.mode:
673
+ config["mode"] = args.mode
674
+ if args.min_reward is not None:
675
+ config["filters"]["min_total_reward"] = args.min_reward
676
+ if args.min_achievements is not None:
677
+ config["filters"]["min_achievements"] = args.min_achievements
678
+ if args.max_cost is not None:
679
+ config["filters"]["max_cost"] = args.max_cost
680
+ if args.max_tokens is not None:
681
+ config["filters"]["max_tokens"] = args.max_tokens
682
+ if args.models:
683
+ config["filters"]["models"] = args.models
684
+
685
+ # Convert database path to proper URL format if needed
686
+ db_url = args.database
687
+ if db_url.startswith("sqlite:///"):
688
+ # Already in URL format
689
+ pass
690
+ elif db_url.endswith(".db"):
691
+ # Convert file path to URL
692
+ db_url = f"sqlite+aiosqlite:///{db_url}"
693
+
694
+ print(f"šŸ¤– Modal/Synth Fine-Tuning Data Filter (v3)")
695
+ print(f"Using database: {db_url}")
696
+ print(f"Output file: {args.output}")
697
+ print(f"Mode: {config['mode']}")
698
+ print(f"Filters: {json.dumps(config['filters'], indent=2)}")
699
+
700
+ if args.dry_run:
701
+ print("\nšŸ” DRY RUN - No output will be written")
702
+
703
+ # Run filtering
704
+ async def run():
705
+ num_examples, stats = await filter_traces_from_turso(
706
+ db_url,
707
+ args.output if not args.dry_run else "/dev/null",
708
+ config
709
+ )
710
+
711
+ # Print statistics
712
+ print_statistics(stats)
713
+
714
+ if not args.dry_run:
715
+ print(f"\nāœ… Successfully wrote {num_examples} training examples to {args.output}")
716
+ print(f" Ready for Modal/Synth fine-tuning!")
717
+ else:
718
+ print(f"\nāœ… Would write {num_examples} training examples (dry run)")
719
+
720
+ asyncio.run(run())
721
+
722
+
723
+ if __name__ == "__main__":
724
+ main()