synth-ai 0.2.16__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (192) hide show
  1. examples/analyze_semantic_words.sh +2 -2
  2. examples/blog_posts/pokemon_vl/README.md +98 -0
  3. examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +25 -0
  4. examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
  5. examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
  6. examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +42 -0
  7. examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
  8. examples/blog_posts/warming_up_to_rl/README.md +158 -0
  9. examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
  10. examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
  11. examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
  12. examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
  13. examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +41 -0
  14. examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
  15. examples/dev/qwen3_32b_qlora_4xh100.toml +5 -0
  16. examples/multi_step/configs/crafter_rl_outcome.toml +1 -1
  17. examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +65 -107
  18. examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +1 -1
  19. examples/multi_step/configs/crafter_rl_stepwise_simple.toml +1 -1
  20. examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
  21. examples/multi_step/configs/verilog_rl_lora.toml +80 -123
  22. examples/qwen_coder/configs/coder_lora_30b.toml +1 -3
  23. examples/qwen_coder/configs/coder_lora_4b.toml +4 -1
  24. examples/qwen_coder/configs/coder_lora_small.toml +1 -3
  25. examples/qwen_vl/README.md +10 -12
  26. examples/qwen_vl/SETUP_COMPLETE.md +7 -8
  27. examples/qwen_vl/VISION_TESTS_COMPLETE.md +2 -3
  28. examples/qwen_vl/collect_data_via_cli.md +76 -84
  29. examples/qwen_vl/collect_vision_traces.py +4 -4
  30. examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +40 -57
  31. examples/qwen_vl/configs/crafter_vlm_sft_example.toml +1 -2
  32. examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +20 -37
  33. examples/qwen_vl/configs/eval_gpt5nano_vision.toml +21 -40
  34. examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
  35. examples/qwen_vl/configs/{filter_qwen2vl_sft.toml → filter_qwen3vl_sft.toml} +4 -5
  36. examples/qwen_vl/configs/filter_vision_sft.toml +2 -3
  37. examples/qwen_vl/crafter_qwen_vl_agent.py +5 -5
  38. examples/qwen_vl/run_vision_comparison.sh +6 -7
  39. examples/rl/README.md +5 -5
  40. examples/rl/configs/rl_from_base_qwen.toml +26 -1
  41. examples/rl/configs/rl_from_base_qwen17.toml +5 -2
  42. examples/rl/task_app/README.md +1 -2
  43. examples/rl/task_app/math_single_step.py +2 -2
  44. examples/run_crafter_demo.sh +2 -2
  45. examples/sft/README.md +1 -1
  46. examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -1
  47. examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -1
  48. examples/swe/task_app/README.md +32 -2
  49. examples/swe/task_app/grpo_swe_mini.py +4 -0
  50. examples/swe/task_app/hosted/envs/crafter/react_agent.py +1 -1
  51. examples/swe/task_app/hosted/envs/mini_swe/environment.py +37 -10
  52. examples/swe/task_app/hosted/inference/openai_client.py +4 -4
  53. examples/swe/task_app/morph_backend.py +178 -0
  54. examples/task_apps/crafter/task_app/README.md +1 -1
  55. examples/task_apps/crafter/task_app/grpo_crafter.py +66 -3
  56. examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +1 -1
  57. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +4 -26
  58. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -2
  59. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +17 -49
  60. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +13 -5
  61. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +15 -1
  62. examples/task_apps/enron/task_app/grpo_enron_task_app.py +1 -1
  63. examples/task_apps/math/README.md +1 -2
  64. examples/task_apps/pokemon_red/README.md +3 -4
  65. examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +6 -5
  66. examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +1 -2
  67. examples/task_apps/pokemon_red/task_app.py +36 -5
  68. examples/task_apps/sokoban/README.md +2 -3
  69. examples/task_apps/verilog/eval_groq_qwen32b.toml +12 -14
  70. examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +1 -1
  71. examples/vlm/configs/crafter_vlm_gpt4o.toml +4 -1
  72. examples/warming_up_to_rl/configs/crafter_fft.toml +4 -1
  73. examples/warming_up_to_rl/configs/crafter_fft_4b.toml +0 -2
  74. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +2 -2
  75. examples/warming_up_to_rl/run_local_rollout_traced.py +1 -1
  76. examples/warming_up_to_rl/task_app/README.md +1 -1
  77. examples/warming_up_to_rl/task_app/grpo_crafter.py +134 -3
  78. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +1 -1
  79. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +3 -27
  80. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -1
  81. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +4 -4
  82. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +6 -3
  83. examples/workflows/math_rl/configs/rl_from_base_qwen.toml +27 -0
  84. examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +5 -0
  85. synth_ai/api/train/builders.py +9 -3
  86. synth_ai/api/train/cli.py +125 -10
  87. synth_ai/api/train/configs/__init__.py +8 -1
  88. synth_ai/api/train/configs/rl.py +32 -7
  89. synth_ai/api/train/configs/sft.py +6 -2
  90. synth_ai/api/train/configs/shared.py +59 -2
  91. synth_ai/auth/credentials.py +119 -0
  92. synth_ai/cli/__init__.py +12 -4
  93. synth_ai/cli/commands/__init__.py +17 -0
  94. synth_ai/cli/commands/demo/__init__.py +6 -0
  95. synth_ai/cli/commands/demo/core.py +163 -0
  96. synth_ai/cli/commands/deploy/__init__.py +23 -0
  97. synth_ai/cli/commands/deploy/core.py +614 -0
  98. synth_ai/cli/commands/deploy/errors.py +72 -0
  99. synth_ai/cli/commands/deploy/validation.py +11 -0
  100. synth_ai/cli/commands/eval/__init__.py +19 -0
  101. synth_ai/cli/commands/eval/core.py +1109 -0
  102. synth_ai/cli/commands/eval/errors.py +81 -0
  103. synth_ai/cli/commands/eval/validation.py +133 -0
  104. synth_ai/cli/commands/filter/__init__.py +12 -0
  105. synth_ai/cli/commands/filter/core.py +388 -0
  106. synth_ai/cli/commands/filter/errors.py +55 -0
  107. synth_ai/cli/commands/filter/validation.py +77 -0
  108. synth_ai/cli/commands/help/__init__.py +177 -0
  109. synth_ai/cli/commands/help/core.py +73 -0
  110. synth_ai/cli/commands/status/__init__.py +64 -0
  111. synth_ai/cli/commands/status/client.py +192 -0
  112. synth_ai/cli/commands/status/config.py +92 -0
  113. synth_ai/cli/commands/status/errors.py +20 -0
  114. synth_ai/cli/commands/status/formatters.py +164 -0
  115. synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
  116. synth_ai/cli/commands/status/subcommands/files.py +79 -0
  117. synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
  118. synth_ai/cli/commands/status/subcommands/models.py +79 -0
  119. synth_ai/cli/commands/status/subcommands/runs.py +81 -0
  120. synth_ai/cli/commands/status/subcommands/summary.py +47 -0
  121. synth_ai/cli/commands/status/utils.py +114 -0
  122. synth_ai/cli/commands/train/__init__.py +53 -0
  123. synth_ai/cli/commands/train/core.py +21 -0
  124. synth_ai/cli/commands/train/errors.py +117 -0
  125. synth_ai/cli/commands/train/judge_schemas.py +199 -0
  126. synth_ai/cli/commands/train/judge_validation.py +304 -0
  127. synth_ai/cli/commands/train/validation.py +443 -0
  128. synth_ai/cli/demo.py +2 -162
  129. synth_ai/cli/deploy/__init__.py +28 -0
  130. synth_ai/cli/deploy/core.py +5 -0
  131. synth_ai/cli/deploy/errors.py +23 -0
  132. synth_ai/cli/deploy/validation.py +5 -0
  133. synth_ai/cli/eval/__init__.py +36 -0
  134. synth_ai/cli/eval/core.py +5 -0
  135. synth_ai/cli/eval/errors.py +31 -0
  136. synth_ai/cli/eval/validation.py +5 -0
  137. synth_ai/cli/filter/__init__.py +28 -0
  138. synth_ai/cli/filter/core.py +5 -0
  139. synth_ai/cli/filter/errors.py +23 -0
  140. synth_ai/cli/filter/validation.py +5 -0
  141. synth_ai/cli/modal_serve/__init__.py +12 -0
  142. synth_ai/cli/modal_serve/core.py +14 -0
  143. synth_ai/cli/modal_serve/errors.py +8 -0
  144. synth_ai/cli/modal_serve/validation.py +11 -0
  145. synth_ai/cli/serve/__init__.py +12 -0
  146. synth_ai/cli/serve/core.py +14 -0
  147. synth_ai/cli/serve/errors.py +8 -0
  148. synth_ai/cli/serve/validation.py +11 -0
  149. synth_ai/cli/setup.py +20 -265
  150. synth_ai/cli/status.py +7 -126
  151. synth_ai/cli/task_app_deploy.py +1 -10
  152. synth_ai/cli/task_app_modal_serve.py +4 -9
  153. synth_ai/cli/task_app_serve.py +4 -11
  154. synth_ai/cli/task_apps.py +58 -1487
  155. synth_ai/cli/train/__init__.py +12 -0
  156. synth_ai/cli/train/core.py +21 -0
  157. synth_ai/cli/train/errors.py +8 -0
  158. synth_ai/cli/train/validation.py +24 -0
  159. synth_ai/cli/train.py +1 -14
  160. synth_ai/demos/crafter/grpo_crafter_task_app.py +1 -1
  161. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
  162. synth_ai/environments/examples/red/engine.py +33 -12
  163. synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
  164. synth_ai/environments/examples/red/environment.py +26 -0
  165. synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
  166. synth_ai/http.py +12 -0
  167. synth_ai/judge_schemas.py +10 -11
  168. synth_ai/learning/rl/client.py +3 -1
  169. synth_ai/streaming/__init__.py +29 -0
  170. synth_ai/streaming/config.py +94 -0
  171. synth_ai/streaming/handlers.py +469 -0
  172. synth_ai/streaming/streamer.py +301 -0
  173. synth_ai/streaming/types.py +95 -0
  174. synth_ai/task/validators.py +2 -2
  175. synth_ai/tracing_v3/migration_helper.py +1 -2
  176. synth_ai/utils/env.py +25 -18
  177. synth_ai/utils/http.py +4 -1
  178. synth_ai/utils/modal.py +2 -2
  179. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/METADATA +8 -3
  180. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/RECORD +184 -109
  181. examples/qwen_vl/configs/eval_qwen2vl_vision.toml +0 -44
  182. synth_ai/cli/tui.py +0 -62
  183. synth_ai/tui/__init__.py +0 -5
  184. synth_ai/tui/__main__.py +0 -13
  185. synth_ai/tui/cli/__init__.py +0 -1
  186. synth_ai/tui/cli/query_experiments.py +0 -164
  187. synth_ai/tui/cli/query_experiments_v3.py +0 -164
  188. synth_ai/tui/dashboard.py +0 -911
  189. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/WHEEL +0 -0
  190. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/entry_points.txt +0 -0
  191. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/licenses/LICENSE +0 -0
  192. {synth_ai-0.2.16.dist-info → synth_ai-0.2.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1109 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import contextlib
5
+ import importlib
6
+ import importlib.util
7
+ import json
8
+ import os
9
+ import sqlite3
10
+ import sys
11
+ import time
12
+ import uuid
13
+ from collections.abc import Sequence
14
+ from functools import lru_cache
15
+ from pathlib import Path
16
+ from typing import TYPE_CHECKING, Any, cast
17
+
18
+ import click
19
+ from synth_ai.task.config import EvalConfig
20
+ from synth_ai.utils.task_app_discovery import discover_eval_config_paths
21
+
22
+ from .errors import (
23
+ EvalCliError,
24
+ EvalConfigNotFoundError,
25
+ EvalConfigParseError,
26
+ InvalidEvalConfigError,
27
+ MetadataFilterFormatError,
28
+ MetadataSQLExecutionError,
29
+ MetadataSQLResultError,
30
+ MissingEvalTableError,
31
+ NoSeedsMatchedError,
32
+ SeedParseError,
33
+ TaskInfoUnavailableError,
34
+ TomlUnavailableError,
35
+ )
36
+ from .validation import validate_eval_options
37
+
38
+ try: # Python 3.11+
39
+ import tomllib as _toml
40
+ except Exception: # pragma: no cover - fallback
41
+ _toml = None # type: ignore[assignment]
42
+
43
+ __all__ = ["command", "get_command", "format_eval_error"]
44
+
45
+ if TYPE_CHECKING:
46
+ from synth_ai.cli.task_apps import AppChoice, TaskAppEntryType
47
+
48
+
49
+ @lru_cache(maxsize=1)
50
+ def _task_apps_module():
51
+ from synth_ai.cli import task_apps as module # local import to avoid circular deps
52
+
53
+ return module
54
+
55
+
56
+ @click.command(
57
+ "eval",
58
+ help="Run one-off rollouts against a task app and print judge/eval summaries.",
59
+ )
60
+ @click.argument("app_id", type=str, required=False)
61
+ @click.option(
62
+ "--config",
63
+ type=click.Path(),
64
+ default=None,
65
+ help="Path to eval TOML (short schema). Auto-discovers the first matching file when omitted.",
66
+ )
67
+ @click.option(
68
+ "--url",
69
+ "task_app_url",
70
+ type=str,
71
+ default=None,
72
+ help="Base URL of a running task app instead of spawning locally (requires --env-file for secrets).",
73
+ )
74
+ @click.option(
75
+ "--seeds",
76
+ default="0,1,2,3,4",
77
+ help="Comma-separated seeds/indices to evaluate. Use negative numbers to wrap around the dataset.",
78
+ )
79
+ @click.option("--split", default="train", show_default=True, help="Dataset split to use")
80
+ @click.option(
81
+ "--model",
82
+ default=None,
83
+ help="Model identifier. When omitted the CLI will prompt based on task metadata.",
84
+ )
85
+ @click.option(
86
+ "--env-file",
87
+ multiple=True,
88
+ type=click.Path(),
89
+ help="Env file(s) to load (API keys, etc.). Required when using --url or remote judges.",
90
+ )
91
+ @click.option(
92
+ "--trace-db",
93
+ default="traces/v3/synth_ai.db",
94
+ show_default=True,
95
+ help="SQLite/Turso URL for storing rollout traces set to 'none' to disable persistence.",
96
+ )
97
+ @click.option(
98
+ "--metadata",
99
+ multiple=True,
100
+ help="Filter tasks by key=value metadata (e.g., --metadata difficulty=easy)",
101
+ )
102
+ @click.option(
103
+ "--metadata-sql",
104
+ default=None,
105
+ help="SQLite query that returns seeds to evaluate (e.g., SELECT seed FROM tasks WHERE difficulty='easy' LIMIT 5)",
106
+ )
107
+ def eval_command(
108
+ app_id: str | None,
109
+ config: str | None,
110
+ task_app_url: str | None,
111
+ seeds: str,
112
+ split: str,
113
+ model: str | None,
114
+ env_file: Sequence[str],
115
+ trace_db: str,
116
+ metadata: Sequence[str],
117
+ metadata_sql: str | None,
118
+ ) -> None:
119
+ try:
120
+ return _eval_command_impl(
121
+ app_id=app_id,
122
+ config=config,
123
+ task_app_url=task_app_url,
124
+ seeds=seeds,
125
+ split=split,
126
+ model=model,
127
+ env_file=env_file,
128
+ trace_db=trace_db,
129
+ metadata=metadata,
130
+ metadata_sql=metadata_sql,
131
+ )
132
+ except EvalCliError as exc:
133
+ raise click.ClickException(format_eval_error(exc)) from exc
134
+
135
+
136
+ def _eval_command_impl(
137
+ app_id: str | None,
138
+ config: str | None,
139
+ task_app_url: str | None,
140
+ seeds: str,
141
+ split: str,
142
+ model: str | None,
143
+ env_file: Sequence[str],
144
+ trace_db: str,
145
+ metadata: Sequence[str],
146
+ metadata_sql: str | None,
147
+ ) -> None:
148
+ """Run rollouts against a task app and report judge statistics.
149
+
150
+ By default the command spins up the selected task app in-process, executes the
151
+ requested seeds, and prints aggregate scores (official and custom judges). When
152
+ pointing at a remote `--url`, supply matching `--env-file` values so the CLI can
153
+ forward authentication headers to the running service.
154
+ """
155
+ module = _task_apps_module()
156
+ task_app_config_type = module.TaskAppConfig
157
+ create_task_app = module.create_task_app
158
+ select_app_choice = module._select_app_choice
159
+ determine_env_files = module._determine_env_files
160
+ load_env_files_into_process = module._load_env_files_into_process
161
+ store_trace = getattr(module, "_store_trace", None)
162
+ pearson = module._pearson
163
+ judge_spec_cls = module.JudgeSpec
164
+ session_tracer_cls = getattr(module, "SessionTracer", None)
165
+
166
+ # Parse and validate TOML config
167
+
168
+ cfg: dict[str, Any] = {}
169
+ eval_cfg: EvalConfig | None = None
170
+ config_path: Path | None = None
171
+
172
+ if config:
173
+ config_path = Path(config)
174
+ else:
175
+ auto_configs = discover_eval_config_paths()
176
+ if auto_configs:
177
+ config_path = auto_configs[0]
178
+ click.echo(f"Using eval config: {config_path}")
179
+
180
+ if config_path:
181
+ if _toml is None:
182
+ raise TomlUnavailableError()
183
+ if not config_path.exists():
184
+ raise EvalConfigNotFoundError(str(config_path))
185
+ try:
186
+ data = config_path.read_bytes()
187
+ parsed = _toml.loads(data.decode("utf-8"))
188
+ if isinstance(parsed, dict):
189
+ section = parsed.get("eval")
190
+ if section is None:
191
+ cfg = dict(parsed)
192
+ elif isinstance(section, dict):
193
+ cfg = dict(section)
194
+ else:
195
+ raise MissingEvalTableError()
196
+ except Exception as exc:
197
+ raise EvalConfigParseError(path=str(config_path), detail=str(exc)) from exc
198
+
199
+ if cfg:
200
+ try:
201
+ normalized_cfg = validate_eval_options(cfg)
202
+ eval_cfg = EvalConfig.from_dict(dict(normalized_cfg))
203
+ cfg = dict(normalized_cfg)
204
+ click.echo(f"✓ Config validated: {len(eval_cfg.seeds)} seeds, model={eval_cfg.model}")
205
+ except (ValueError, TypeError) as validation_error:
206
+ raise InvalidEvalConfigError(detail=str(validation_error)) from validation_error
207
+ else:
208
+ cfg = {}
209
+
210
+ # CLI args override config
211
+ if eval_cfg:
212
+ app_id = app_id or eval_cfg.app_id
213
+ else:
214
+ app_id = app_id or (cfg.get("app_id") if isinstance(cfg.get("app_id"), str) else None) # type: ignore
215
+
216
+ metadata_filters: dict[str, str] = {}
217
+ if eval_cfg:
218
+ metadata_filters.update(eval_cfg.metadata)
219
+ else:
220
+ cfg_metadata = cfg.get("metadata")
221
+ if isinstance(cfg_metadata, dict):
222
+ for key, value in cfg_metadata.items():
223
+ metadata_filters[str(key)] = str(value)
224
+ elif isinstance(cfg_metadata, list):
225
+ for item in cfg_metadata:
226
+ if isinstance(item, str) and "=" in item:
227
+ key, value = item.split("=", 1)
228
+ metadata_filters[key.strip()] = value.strip()
229
+
230
+ for item in metadata or ():
231
+ if "=" not in item:
232
+ raise MetadataFilterFormatError(entry=item)
233
+ key, value = item.split("=", 1)
234
+ key = key.strip()
235
+ value = value.strip()
236
+ if not key or not value:
237
+ raise MetadataFilterFormatError(entry=item)
238
+ metadata_filters[key] = value
239
+
240
+ metadata_sql_query: str | None = None
241
+ if eval_cfg and eval_cfg.metadata_sql:
242
+ metadata_sql_query = eval_cfg.metadata_sql
243
+ else:
244
+ cfg_metadata_sql = cfg.get("metadata_sql")
245
+ if isinstance(cfg_metadata_sql, dict):
246
+ metadata_sql_query = cfg_metadata_sql.get("query") or cfg_metadata_sql.get("sql")
247
+ elif isinstance(cfg_metadata_sql, str):
248
+ metadata_sql_query = cfg_metadata_sql
249
+
250
+ if metadata_sql:
251
+ metadata_sql_query = metadata_sql
252
+ if metadata_sql_query is not None:
253
+ metadata_sql_query = str(metadata_sql_query)
254
+
255
+ trace_db_url: str | None = None
256
+ trace_db = (trace_db or "").strip()
257
+ if trace_db and trace_db.lower() not in {"none", "off", "disable"}:
258
+ if "://" in trace_db:
259
+ trace_db_url = trace_db
260
+ else:
261
+ trace_path = Path(trace_db).expanduser()
262
+ trace_path.parent.mkdir(parents=True, exist_ok=True)
263
+ trace_db_url = f"sqlite+aiosqlite:///{trace_path}"
264
+ trace_tracer = (
265
+ session_tracer_cls(db_url=trace_db_url, auto_save=True)
266
+ if trace_db_url and session_tracer_cls is not None
267
+ else None
268
+ )
269
+
270
+ # Determine selection params (CLI takes precedence; TOML only fills unset model/seeds/env)
271
+ if cfg.get("model") and not model:
272
+ model = str(cfg["model"]) # type: ignore[index]
273
+ if cfg.get("seeds") and seeds == "0,1,2,3,4":
274
+ val = cfg["seeds"]
275
+ if isinstance(val, list):
276
+ with contextlib.suppress(Exception):
277
+ seeds = ",".join(str(int(x)) for x in val)
278
+ elif isinstance(val, str):
279
+ seeds = val
280
+ elif isinstance(val, int):
281
+ seeds = str(val)
282
+ if cfg.get("env_file") and not env_file:
283
+ ef = cfg["env_file"]
284
+ if isinstance(ef, str):
285
+ env_file = (ef,) # type: ignore[assignment]
286
+ elif isinstance(ef, list):
287
+ env_file = tuple(str(x) for x in ef) # type: ignore[assignment]
288
+
289
+ choice_for_env: AppChoice | None = None
290
+ entry: TaskAppEntryType | None = None
291
+ if task_app_url is None:
292
+ choice_for_env = select_app_choice(app_id, purpose="eval")
293
+ entry = choice_for_env.ensure_entry()
294
+
295
+ env_paths: list[Path] = []
296
+ if entry is not None:
297
+ original_env_path = choice_for_env.path if choice_for_env is not None else None
298
+ env_paths = determine_env_files(entry, env_file, original_path=original_env_path)
299
+ else:
300
+ if not env_file:
301
+ raise click.ClickException("--env-file is required when using --url")
302
+ for candidate in env_file:
303
+ p = Path(candidate).expanduser()
304
+ if not p.exists():
305
+ raise click.ClickException(f"Env file not found: {p}")
306
+ env_paths.append(p)
307
+
308
+ click.echo("Using env file(s): " + ", ".join(str(p) for p in env_paths))
309
+ load_env_files_into_process([str(Path(p)) for p in env_paths])
310
+
311
+ if task_app_url is None:
312
+ config = entry.config_factory() # type: ignore[union-attr]
313
+ # Help the type checker; runtime check also enforced in server.run_task_app
314
+ if not isinstance(config, task_app_config_type):
315
+ raise click.ClickException(
316
+ "Invalid task app: config_factory did not return TaskAppConfig"
317
+ )
318
+ app = create_task_app(config)
319
+
320
+ # Determine supported models
321
+ inference_meta: dict[str, Any] = {}
322
+ supported: list[str] = []
323
+ seen_models: set[str] = set()
324
+
325
+ def _add_supported_model(candidate: Any) -> None:
326
+ if not candidate:
327
+ return
328
+ text = str(candidate).strip()
329
+ if not text or text in seen_models:
330
+ return
331
+ supported.append(text)
332
+ seen_models.add(text)
333
+
334
+ if task_app_url is None:
335
+ try:
336
+ if hasattr(config, "base_task_info") and config.base_task_info:
337
+ inf_obj = getattr(config.base_task_info, "inference", None)
338
+ if inf_obj is not None:
339
+ if hasattr(inf_obj, "model_dump"):
340
+ inference_meta = dict(inf_obj.model_dump(exclude_none=True)) # type: ignore[attr-defined]
341
+ elif isinstance(inf_obj, dict):
342
+ inference_meta = dict(inf_obj)
343
+ except Exception:
344
+ inference_meta = {}
345
+ else:
346
+ try:
347
+ import httpx as _hx
348
+
349
+ headers = {}
350
+ api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
351
+ if api_key:
352
+ headers["X-API-Key"] = api_key
353
+ with _hx.Client(base_url=task_app_url, headers=headers, timeout=15.0) as c:
354
+ info = c.get("/info").json()
355
+ inf = info.get("inference") if isinstance(info, dict) else None
356
+ if isinstance(inf, dict):
357
+ inference_meta = dict(inf)
358
+ except Exception:
359
+ inference_meta = {}
360
+
361
+ default_model = inference_meta.get("model")
362
+ if isinstance(default_model, str):
363
+ _add_supported_model(default_model)
364
+
365
+ models_field = inference_meta.get("models")
366
+ if isinstance(models_field, list):
367
+ for candidate in models_field:
368
+ _add_supported_model(candidate)
369
+
370
+ supported_models = inference_meta.get("supported_models")
371
+ if isinstance(supported_models, list):
372
+ for candidate in supported_models:
373
+ _add_supported_model(candidate)
374
+
375
+ providers = inference_meta.get("providers")
376
+ if isinstance(providers, list):
377
+ if "openai" in providers:
378
+ _add_supported_model("gpt-5")
379
+ if "groq" in providers:
380
+ _add_supported_model("groq:llama-3.1-70b-versatile")
381
+
382
+ _add_supported_model("synth:qwen-0.6b")
383
+
384
+ selected_model = model
385
+ if not selected_model:
386
+ if not supported:
387
+ raise click.ClickException(
388
+ "No supported models; supply --model or add base_task_info.inference.model"
389
+ )
390
+ click.echo("Select model to evaluate:")
391
+ for idx, m in enumerate(supported, start=1):
392
+ click.echo(f" {idx}) {m}")
393
+ choice_idx = click.prompt("Enter choice", type=click.IntRange(1, len(supported)))
394
+ selected_model = supported[choice_idx - 1]
395
+
396
+ try:
397
+ seed_values = [int(s.strip()) for s in seeds.split(",") if s.strip()]
398
+ except Exception as exc:
399
+ raise SeedParseError(value=seeds) from exc
400
+
401
+ import httpx
402
+
403
+ headers = {}
404
+ api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
405
+ if api_key:
406
+ headers["X-API-Key"] = api_key
407
+
408
+ # Precompute optional policy overrides from TOML
409
+ policy_overrides: dict[str, Any] = {}
410
+ try:
411
+ # Accept [eval.policy] table or top-level keys for convenience
412
+ if isinstance(cfg.get("policy"), dict):
413
+ policy_overrides.update(dict(cfg["policy"]))
414
+ # Back-compat: allow temperature/max_tokens at top level
415
+ for k in (
416
+ "temperature",
417
+ "max_tokens",
418
+ "reasoning_effort",
419
+ "system_hint",
420
+ "tool_choice",
421
+ "inference_url",
422
+ ):
423
+ if k in cfg and k not in policy_overrides:
424
+ policy_overrides[k] = cfg.get(k)
425
+ except Exception:
426
+ policy_overrides = {}
427
+
428
+ raw_concurrency = cfg.get("concurrency")
429
+ try:
430
+ concurrency_limit = int(raw_concurrency) if raw_concurrency is not None else 1
431
+ except Exception:
432
+ concurrency_limit = 1
433
+ if concurrency_limit <= 0:
434
+ concurrency_limit = 1
435
+ concurrency_limit = min(concurrency_limit, max(1, len(seed_values)))
436
+
437
+ judge_specs: list[Any] = []
438
+
439
+ def _register_judge(name_hint: str | None, judge_cfg: dict[str, Any]) -> None:
440
+ if not judge_cfg:
441
+ return
442
+ judge_module = judge_cfg.get("module")
443
+ judge_path = judge_cfg.get("path")
444
+ judge_callable_name = judge_cfg.get("callable") or judge_cfg.get("function")
445
+ if judge_module and judge_path:
446
+ raise click.ClickException("Judge config cannot set both 'module' and 'path'")
447
+ if not judge_module and not judge_path:
448
+ raise click.ClickException("Judge config requires 'module' or 'path'")
449
+ try:
450
+ if judge_module:
451
+ module = importlib.import_module(str(judge_module))
452
+ else:
453
+ path = Path(str(judge_path)).expanduser()
454
+ if not path.exists():
455
+ raise click.ClickException(f"Judge module path not found: {path}")
456
+ spec = importlib.util.spec_from_file_location(
457
+ f"_eval_judge_{path.stem}", path
458
+ )
459
+ if not spec or not spec.loader:
460
+ raise click.ClickException(f"Failed to load judge module from {path}")
461
+ module = importlib.util.module_from_spec(spec)
462
+ sys.modules[spec.name] = module
463
+ spec.loader.exec_module(module)
464
+ except click.ClickException:
465
+ raise
466
+ except Exception as exc:
467
+ raise click.ClickException(f"Unable to load judge module: {exc}") from exc
468
+
469
+ if judge_callable_name:
470
+ try:
471
+ judge_fn = getattr(module, str(judge_callable_name))
472
+ except AttributeError as exc:
473
+ raise click.ClickException(
474
+ f"Judge callable '{judge_callable_name}' not found in module"
475
+ ) from exc
476
+ else:
477
+ if hasattr(module, "judge"):
478
+ judge_fn = module.judge
479
+ else:
480
+ raise click.ClickException("Judge module must expose 'judge' callable")
481
+
482
+ if not callable(judge_fn):
483
+ raise click.ClickException("Judge callable is not callable")
484
+
485
+ judge_kwargs = {
486
+ k: v
487
+ for k, v in judge_cfg.items()
488
+ if k not in {"module", "path", "callable", "function", "name"}
489
+ }
490
+ display_name = str(
491
+ judge_cfg.get("name")
492
+ or name_hint
493
+ or f"judge{len(judge_specs) + 1}"
494
+ )
495
+ judge_specs.append(judge_spec_cls(display_name, judge_fn, judge_kwargs))
496
+
497
+ raw_judge_cfg = cfg.get("judge")
498
+ if isinstance(raw_judge_cfg, dict) and raw_judge_cfg:
499
+ direct_keys = {"module", "path", "callable", "function", "name"}
500
+ has_direct_keys = any(key in raw_judge_cfg for key in direct_keys)
501
+ nested_candidates = [
502
+ (key, value)
503
+ for key, value in raw_judge_cfg.items()
504
+ if isinstance(value, dict)
505
+ ]
506
+ if has_direct_keys and not nested_candidates:
507
+ _register_judge(None, raw_judge_cfg)
508
+ else:
509
+ for sub_name, sub_cfg in nested_candidates:
510
+ _register_judge(sub_name, sub_cfg)
511
+
512
+ raw_judges_list = cfg.get("judges")
513
+ if isinstance(raw_judges_list, list):
514
+ for _index, entry in enumerate(raw_judges_list, start=1):
515
+ if isinstance(entry, dict):
516
+ _register_judge(entry.get("name") or f"judge{len(judge_specs) + 1}", entry)
517
+
518
+ records: list[dict[str, Any]] = []
519
+
520
+ successes = 0
521
+ failures = 0
522
+ # Aggregate outcome stats across successful seeds
523
+ outcome_sum: float = 0.0
524
+ outcome_count: int = 0
525
+ outcome_correct: int = 0
526
+
527
+ def _build_task_rows(taskset: Any) -> dict[int, dict[str, Any]]:
528
+ rows: dict[int, dict[str, Any]] = {}
529
+ if not isinstance(taskset, dict):
530
+ return rows
531
+
532
+ scenario_ids = taskset.get("scenario_ids") or []
533
+ loop_ids = taskset.get("loop_ids") or []
534
+ thread_ids = taskset.get("thread_ids") or []
535
+ difficulty_map = taskset.get("difficulty_map") or {}
536
+
537
+ max_len = max(len(scenario_ids), len(loop_ids), len(thread_ids))
538
+ for seed in range(max_len):
539
+ scenario_id = scenario_ids[seed] if seed < len(scenario_ids) else None
540
+ loop_id = loop_ids[seed] if seed < len(loop_ids) else None
541
+ thread_id = thread_ids[seed] if seed < len(thread_ids) else None
542
+ difficulty = None
543
+ if isinstance(difficulty_map, dict):
544
+ if scenario_id and scenario_id in difficulty_map:
545
+ difficulty = difficulty_map.get(scenario_id)
546
+ elif str(seed) in difficulty_map:
547
+ difficulty = difficulty_map.get(str(seed))
548
+
549
+ rows[seed] = {
550
+ "seed": seed,
551
+ "scenario_id": scenario_id,
552
+ "loop_id": loop_id,
553
+ "thread_id": thread_id,
554
+ "difficulty": difficulty,
555
+ }
556
+ return rows
557
+
558
+ def _apply_metadata_filters(
559
+ rows: dict[int, dict[str, Any]], seeds_list: list[int], filters: dict[str, str]
560
+ ) -> list[int]:
561
+ if not filters:
562
+ return seeds_list
563
+ filtered: list[int] = []
564
+ for seed in seeds_list:
565
+ row = rows.get(seed)
566
+ if not row:
567
+ continue
568
+ include = True
569
+ for key, expected in filters.items():
570
+ actual = row.get(key)
571
+ if actual is None:
572
+ include = False
573
+ break
574
+ if str(actual).lower() != expected.lower():
575
+ include = False
576
+ break
577
+ if include:
578
+ filtered.append(seed)
579
+ return filtered
580
+
581
+ def _apply_metadata_sql(
582
+ rows: dict[int, dict[str, Any]], seeds_list: list[int], query: str
583
+ ) -> list[int]:
584
+ """Return seeds that satisfy an arbitrary SQL query.
585
+
586
+ The query is executed against an in-memory SQLite table named `tasks`
587
+ with columns (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT).
588
+ Any rows whose `seed` value (or first column if `seed` is absent) appear in the result set are retained.
589
+ """
590
+ if not query:
591
+ return seeds_list
592
+ conn = sqlite3.connect(":memory:")
593
+ try:
594
+ cur = conn.cursor()
595
+ cur.execute(
596
+ "CREATE TABLE tasks (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT)"
597
+ )
598
+ insert_stmt = (
599
+ "INSERT INTO tasks (seed, scenario_id, loop_id, thread_id, difficulty) VALUES (?,?,?,?,?)"
600
+ )
601
+ for seed in seeds_list:
602
+ row = rows.get(seed, {})
603
+ cur.execute(
604
+ insert_stmt,
605
+ [
606
+ seed,
607
+ row.get("scenario_id"),
608
+ row.get("loop_id"),
609
+ row.get("thread_id"),
610
+ row.get("difficulty"),
611
+ ],
612
+ )
613
+
614
+ result = cur.execute(query)
615
+ fetched = result.fetchall()
616
+ if not fetched:
617
+ return []
618
+ description = result.description or []
619
+ col_names = [col[0] for col in description]
620
+ seeds_out: list[int] = []
621
+ for entry in fetched:
622
+ value = entry[col_names.index("seed")] if "seed" in col_names else entry[0]
623
+ try:
624
+ seeds_out.append(int(value))
625
+ except Exception as exc:
626
+ raise MetadataSQLResultError(
627
+ query=query,
628
+ detail="non-integer value returned",
629
+ ) from exc
630
+ seeds_set = set(seeds_out)
631
+ return [seed for seed in seeds_list if seed in seeds_set]
632
+ except sqlite3.Error as exc:
633
+ raise MetadataSQLExecutionError(query=query, detail=str(exc)) from exc
634
+ finally:
635
+ conn.close()
636
+
637
+ async def _run_eval() -> None:
638
+ nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records, seed_values
639
+
640
+ if trace_tracer is not None and trace_tracer.db is None:
641
+ await trace_tracer.initialize()
642
+
643
+ if task_app_url is None:
644
+ transport = httpx.ASGITransport(app=app) # type: ignore[name-defined]
645
+ async_client = httpx.AsyncClient(
646
+ transport=cast(Any, transport),
647
+ base_url="http://eval.local",
648
+ timeout=300.0,
649
+ follow_redirects=True,
650
+ headers=headers,
651
+ )
652
+ else:
653
+ async_client = httpx.AsyncClient(
654
+ base_url=task_app_url,
655
+ timeout=300.0,
656
+ follow_redirects=True,
657
+ headers=headers,
658
+ )
659
+
660
+ try:
661
+ taskset_payload: dict[str, Any] | None = None
662
+ try:
663
+ task_info_response = await async_client.get("/task_info")
664
+ except Exception:
665
+ task_info_response = None
666
+ if task_info_response is not None and task_info_response.status_code == 200:
667
+ with contextlib.suppress(Exception):
668
+ payload_json = task_info_response.json()
669
+ if isinstance(payload_json, dict) and "taskset" in payload_json:
670
+ taskset_payload = payload_json.get("taskset")
671
+ if not isinstance(taskset_payload, dict):
672
+ taskset_payload = None
673
+ elif isinstance(payload_json, dict):
674
+ taskset_payload = payload_json
675
+
676
+ available_seeds = list(seed_values)
677
+ if metadata_sql_query or metadata_filters:
678
+ if not taskset_payload:
679
+ raise TaskInfoUnavailableError()
680
+ rows = _build_task_rows(taskset_payload)
681
+ if metadata_sql_query:
682
+ available_seeds = _apply_metadata_sql(rows, available_seeds, metadata_sql_query)
683
+ if metadata_filters:
684
+ available_seeds = _apply_metadata_filters(rows, available_seeds, metadata_filters)
685
+ if not available_seeds:
686
+ raise NoSeedsMatchedError()
687
+ seed_values = available_seeds
688
+
689
+ semaphore = asyncio.Semaphore(concurrency_limit)
690
+
691
+ async def _run_seed(seed_val: int) -> None:
692
+ nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records
693
+ # Read env_name and policy_name from config if available
694
+ env_name = cfg.get("env_name") or (cfg.get("env", {}).get("env_name") if isinstance(cfg.get("env"), dict) else None)
695
+ policy_name = cfg.get("policy_name") or (cfg.get("policy", {}).get("policy_name") if isinstance(cfg.get("policy"), dict) else None)
696
+ env_config_overrides = cfg.get("env_config", {}) if isinstance(cfg.get("env_config"), dict) else {}
697
+ policy_config_overrides = cfg.get("policy_config", {}) if isinstance(cfg.get("policy_config"), dict) else {}
698
+
699
+ # Debug: print config parsing
700
+ if seed_val == 0:
701
+ click.echo(f"[DEBUG] env_name from config: {env_name}")
702
+ click.echo(f"[DEBUG] policy_name from config: {policy_name}")
703
+
704
+ # Generate default ops sequence if not provided
705
+ max_llm_calls = policy_config_overrides.get("max_llm_calls", 10)
706
+ ops_list = cfg.get("ops", [])
707
+ if not ops_list:
708
+ # Generate default "agent, env" pairs for max_llm_calls
709
+ ops_list = ["agent", "env"] * int(max_llm_calls)
710
+
711
+ body = {
712
+ "run_id": str(uuid.uuid4()),
713
+ "env": {"config": {"split": split, "index": seed_val, **env_config_overrides}, "seed": seed_val},
714
+ "policy": {
715
+ "policy_name": policy_name or selected_model,
716
+ "config": {"model": selected_model, **policy_overrides, **policy_config_overrides},
717
+ },
718
+ "ops": ops_list,
719
+ "record": {
720
+ "return_trace": cfg.get("return_trace", True),
721
+ "trace_format": cfg.get("trace_format", "structured"),
722
+ },
723
+ "mode": "eval", # RolloutMode.EVAL: use inference URLs as-is, no transformations
724
+ }
725
+ if env_name:
726
+ body["env"]["env_name"] = env_name # type: ignore[assignment]
727
+
728
+ # Debug: print the body being sent
729
+ if seed_val == 0:
730
+ click.echo(f"[DEBUG] rollout body env: {body['env']}")
731
+ click.echo(f"[DEBUG] rollout body policy: {body['policy']}")
732
+ click.echo(f"[DEBUG] rollout body mode: {body.get('mode', 'NOT SET')}")
733
+ click.echo(f"[DEBUG] rollout record payload: {body.get('record')}")
734
+ rollout_elapsed: float | None = None
735
+ rollout_start = time.perf_counter()
736
+ try:
737
+ import logging
738
+ _log = logging.getLogger(__name__)
739
+ _log.info(f"[EVAL_BODY_DEBUG] Sending body with mode={body.get('mode')}")
740
+ async with semaphore:
741
+ response = await async_client.post("/rollout", json=body)
742
+ rollout_elapsed = time.perf_counter() - rollout_start
743
+ except Exception as exc:
744
+ failures += 1
745
+ click.echo(f"seed={seed_val} error={exc}")
746
+ return
747
+
748
+ ok = 200 <= response.status_code < 300
749
+ if ok:
750
+ successes += 1
751
+ else:
752
+ failures += 1
753
+
754
+ summary = [f"seed={seed_val}", f"status={response.status_code}"]
755
+ data: Any
756
+ try:
757
+ data = response.json()
758
+ except Exception:
759
+ data = None
760
+
761
+ # Debug: print validation errors
762
+ if response.status_code == 422 and data:
763
+ click.echo(f"[DEBUG] 422 Validation Error: {data}")
764
+
765
+ metrics: dict[str, Any] | None = None
766
+ completion: str | None = None
767
+ prompt_index: int | None = None
768
+ prompt_text: str | None = None
769
+ task_id: str | None = None
770
+ task_split: str | None = None
771
+ task_rubric_id: str | None = None
772
+
773
+ trace_namespace: dict[str, Any] | None = None
774
+ session_trace_dict: dict[str, Any] | None = None
775
+
776
+ if isinstance(data, dict):
777
+ import logging
778
+ _logger = logging.getLogger(__name__)
779
+ _logger.info(f"[EVAL_DEBUG] Response data keys: {list(data.keys())}")
780
+ if "detail" in data:
781
+ _logger.error(f"[EVAL_DEBUG] Task app returned error: {data['detail']}")
782
+ trace_namespace = data.get("trace")
783
+ _logger.info(f"[EVAL_DEBUG] trace_namespace type: {type(trace_namespace)}, value: {trace_namespace if not isinstance(trace_namespace, dict) else 'dict with keys: ' + str(list(trace_namespace.keys()) if trace_namespace else 'None')}")
784
+ if not isinstance(trace_namespace, dict):
785
+ raise RuntimeError(
786
+ "The 'synth-ai eval' command requires trace payloads in rollout responses. "
787
+ "Ensure the rollout request includes 'trace_format': 'structured' and 'return_trace': true, "
788
+ "and that task app tracing is enabled (TASKAPP_TRACING_ENABLED=1). "
789
+ "Note: This is specific to the eval command - general rollout endpoints don't require traces."
790
+ )
791
+ # Handle both "compact" and "full" trace formats:
792
+ # - compact: trace_namespace contains {session_id, metadata, ...}
793
+ # - full: trace_namespace IS the full session_trace dict
794
+ session_trace_dict = trace_namespace.get("session_trace")
795
+ if not isinstance(session_trace_dict, dict):
796
+ # If no session_trace key, assume "full" format where trace itself is the session_trace
797
+ if "session_id" in trace_namespace:
798
+ session_trace_dict = trace_namespace
799
+ else:
800
+ raise RuntimeError(
801
+ "The 'synth-ai eval' command requires 'session_trace' in the trace payload or a valid full trace format. "
802
+ "Ensure the task app is using tracing_v3 and returning structured trace data."
803
+ )
804
+ metrics = data.get("metrics") if isinstance(data.get("metrics"), dict) else None
805
+ if metrics:
806
+ mean_return = metrics.get("mean_return") or metrics.get("total_reward")
807
+ outcome = metrics.get("outcome_score")
808
+ if mean_return is not None:
809
+ summary.append(f"mean_return={mean_return}")
810
+ if outcome is not None:
811
+ summary.append(f"outcome={outcome}")
812
+ try:
813
+ val = float(outcome)
814
+ outcome_sum += val
815
+ outcome_count += 1
816
+ if val >= 0.5:
817
+ outcome_correct += 1
818
+ except Exception:
819
+ pass
820
+ trajs = (
821
+ data.get("trajectories")
822
+ if isinstance(data.get("trajectories"), list)
823
+ else None
824
+ )
825
+ if trajs:
826
+ first = trajs[0] if trajs else None
827
+ steps = first.get("steps") if isinstance(first, dict) else None
828
+ if isinstance(steps, list) and steps:
829
+ step0 = steps[0]
830
+ tool_calls = step0.get("tool_calls") or step0.get("tools") or []
831
+ if isinstance(tool_calls, list):
832
+ summary.append(f"tool_calls={len(tool_calls)}")
833
+ obs = step0.get("obs") if isinstance(step0, dict) else None
834
+ if isinstance(obs, dict):
835
+ idx_val = obs.get("prompt_index")
836
+ if isinstance(idx_val, int):
837
+ prompt_index = idx_val
838
+ prompt_raw = obs.get("prompt")
839
+ if isinstance(prompt_raw, str):
840
+ prompt_text = prompt_raw
841
+ if task_id is None:
842
+ candidate_id = obs.get("task_id")
843
+ if isinstance(candidate_id, str) and candidate_id:
844
+ task_id = candidate_id
845
+ if task_split is None:
846
+ candidate_split = obs.get("task_split")
847
+ if isinstance(candidate_split, str) and candidate_split:
848
+ task_split = candidate_split
849
+ if task_rubric_id is None:
850
+ candidate_rid = obs.get("task_rubric_id")
851
+ if isinstance(candidate_rid, str) and candidate_rid:
852
+ task_rubric_id = candidate_rid
853
+ final = first.get("final") if isinstance(first, dict) else None
854
+ if isinstance(final, dict):
855
+ final_obs = final.get("observation")
856
+ if isinstance(final_obs, dict):
857
+ comp_val = final_obs.get("completion")
858
+ if isinstance(comp_val, str):
859
+ completion = comp_val
860
+ if task_id is None:
861
+ candidate_id = final_obs.get("task_id")
862
+ if isinstance(candidate_id, str) and candidate_id:
863
+ task_id = candidate_id
864
+ if task_split is None:
865
+ candidate_split = final_obs.get("task_split")
866
+ if isinstance(candidate_split, str) and candidate_split:
867
+ task_split = candidate_split
868
+ if task_rubric_id is None:
869
+ candidate_rid = final_obs.get("task_rubric_id")
870
+ if isinstance(candidate_rid, str) and candidate_rid:
871
+ task_rubric_id = candidate_rid
872
+ final_info = final.get("info")
873
+ if isinstance(final_info, dict):
874
+ if task_id is None:
875
+ candidate_id = final_info.get("task_id")
876
+ if isinstance(candidate_id, str) and candidate_id:
877
+ task_id = candidate_id
878
+ if task_split is None:
879
+ candidate_split = final_info.get("task_split")
880
+ if isinstance(candidate_split, str) and candidate_split:
881
+ task_split = candidate_split
882
+ if task_rubric_id is None:
883
+ candidate_rid = final_info.get("task_rubric_id")
884
+ if isinstance(candidate_rid, str) and candidate_rid:
885
+ task_rubric_id = candidate_rid
886
+ if task_id:
887
+ summary.append(f"task_id={task_id}")
888
+ click.echo(" ".join(summary))
889
+ with contextlib.suppress(Exception):
890
+ click.echo(json.dumps(data, indent=2))
891
+ else:
892
+ click.echo(" ".join(summary))
893
+
894
+ official_score = None
895
+ if isinstance(metrics, dict):
896
+ for key in ("mean_return", "total_reward", "outcome_score"):
897
+ val = metrics.get(key)
898
+ if isinstance(val, int | float):
899
+ official_score = float(val)
900
+ break
901
+ if official_score is None and isinstance(data, dict):
902
+ try:
903
+ reward_val = data["trajectories"][0]["steps"][0].get("reward")
904
+ if isinstance(reward_val, int | float):
905
+ official_score = float(reward_val)
906
+ except Exception:
907
+ pass
908
+
909
+ if official_score is not None:
910
+ if official_score < 0.0:
911
+ official_score = 0.0
912
+ elif official_score > 1.0:
913
+ official_score = min(1.0, official_score)
914
+
915
+ judge_scores: dict[str, float | None] = {}
916
+ judges_timings: dict[str, float | None] = {}
917
+ timings: dict[str, Any] = {
918
+ "rollout_s": rollout_elapsed,
919
+ "judges": judges_timings,
920
+ }
921
+ if judge_specs:
922
+ for spec in judge_specs:
923
+ score_value: float | None = None
924
+ judge_elapsed: float | None = None
925
+ # Run judges for all tasks (text-based and trajectory-based)
926
+ # Text-based tasks have completion, trajectory-based tasks use response
927
+ judge_payload = {
928
+ "seed": seed_val,
929
+ "prompt_index": prompt_index,
930
+ "prompt": prompt_text,
931
+ "completion": completion,
932
+ "metrics": metrics,
933
+ "response": data,
934
+ "trace": trace_namespace,
935
+ }
936
+ try:
937
+ judge_start = time.perf_counter()
938
+ result = spec.fn(judge_payload, **spec.kwargs)
939
+ judge_elapsed = time.perf_counter() - judge_start
940
+ if isinstance(result, int | float):
941
+ score_value = float(result)
942
+ except Exception as exc:
943
+ if judge_elapsed is None:
944
+ judge_elapsed = time.perf_counter() - judge_start
945
+ click.echo(f"seed={seed_val} judge[{spec.name}]_error={exc}")
946
+ judges_timings[spec.name] = judge_elapsed
947
+ judge_scores[spec.name] = score_value
948
+
949
+ if trace_tracer is not None and trace_namespace:
950
+ storage_metadata = {
951
+ "eval_seed": seed_val,
952
+ "prompt_index": prompt_index,
953
+ "task_id": task_id,
954
+ "task_split": task_split,
955
+ "task_rubric_id": task_rubric_id,
956
+ "official_score": official_score,
957
+ "judge_scores": judge_scores,
958
+ "model": selected_model,
959
+ "prompt": prompt_text,
960
+ "completion": completion,
961
+ }
962
+ if store_trace is not None:
963
+ await store_trace(trace_tracer, trace_namespace, storage_metadata)
964
+
965
+ records.append(
966
+ {
967
+ "seed": seed_val,
968
+ "prompt_index": prompt_index,
969
+ "task_id": task_id,
970
+ "task_split": task_split,
971
+ "task_rubric_id": task_rubric_id,
972
+ "official_score": official_score,
973
+ "judge_scores": judge_scores,
974
+ "timings": timings,
975
+ }
976
+ )
977
+
978
+ await asyncio.gather(*[_run_seed(seed_val) for seed_val in seed_values])
979
+ finally:
980
+ await async_client.aclose()
981
+
982
+ try:
983
+ asyncio.run(_run_eval())
984
+ finally:
985
+ if trace_tracer is not None and trace_tracer.db is not None:
986
+ asyncio.run(trace_tracer.db.close())
987
+
988
+ click.echo(
989
+ f"Eval complete: {successes} ok, {failures} failed; model={selected_model}, split={split}"
990
+ )
991
+
992
+ if outcome_count > 0:
993
+ mean_outcome = outcome_sum / float(outcome_count)
994
+ frac_right = outcome_correct / float(outcome_count)
995
+ click.echo(
996
+ f"Outcome summary: correct={outcome_correct}/{outcome_count} ({frac_right:.2%}), mean_outcome={mean_outcome:.3f}"
997
+ )
998
+
999
+ if records:
1000
+ judge_specs = judge_specs or [] # ensure iterable
1001
+ official_scores = [
1002
+ r["official_score"] for r in records if r["official_score"] is not None
1003
+ ]
1004
+ if official_scores:
1005
+ click.echo(f" Official mean: {sum(official_scores) / len(official_scores):.3f}")
1006
+ else:
1007
+ click.echo(" Official mean: n/a")
1008
+
1009
+ for spec in judge_specs:
1010
+ spec_scores = [
1011
+ record["judge_scores"].get(spec.name)
1012
+ for record in records
1013
+ if record["judge_scores"].get(spec.name) is not None
1014
+ ]
1015
+ if spec_scores:
1016
+ mean_spec = sum(spec_scores) / len(spec_scores)
1017
+ click.echo(f" [{spec.name}] mean: {mean_spec:.3f}")
1018
+ else:
1019
+ click.echo(f" [{spec.name}] mean: n/a")
1020
+
1021
+ paired = [
1022
+ (
1023
+ record["official_score"],
1024
+ record["judge_scores"].get(spec.name),
1025
+ )
1026
+ for record in records
1027
+ if record["official_score"] is not None
1028
+ and record["judge_scores"].get(spec.name) is not None
1029
+ ]
1030
+ if len(paired) >= 2:
1031
+ corr = pearson(
1032
+ [p[0] for p in paired if p[0] is not None],
1033
+ [p[1] for p in paired if p[1] is not None],
1034
+ )
1035
+ if corr is not None:
1036
+ click.echo(f" Pearson r: {corr:.3f}")
1037
+ else:
1038
+ click.echo(" Pearson r: undefined (zero variance)")
1039
+ else:
1040
+ click.echo(" Pearson r: n/a (need ≥2 paired scores)")
1041
+
1042
+ header = ["Seed", "Prompt", "Official"]
1043
+ header.extend(spec.name for spec in judge_specs)
1044
+ rows: list[list[str]] = []
1045
+ for record in sorted(records, key=lambda r: (r["seed"], r.get("prompt_index") or -1)):
1046
+ seed_val = str(record["seed"])
1047
+ prompt_idx = (
1048
+ str(record["prompt_index"])
1049
+ if record["prompt_index"] is not None
1050
+ else "-"
1051
+ )
1052
+ official_val = (
1053
+ f"{record['official_score']:.3f}"
1054
+ if record["official_score"] is not None
1055
+ else "-"
1056
+ )
1057
+ row = [seed_val, prompt_idx, official_val]
1058
+ for spec in judge_specs:
1059
+ score_val = record["judge_scores"].get(spec.name)
1060
+ row.append(f"{score_val:.3f}" if isinstance(score_val, int | float) else "-")
1061
+ rows.append(row)
1062
+
1063
+ widths = [len(col) for col in header]
1064
+ for row in rows:
1065
+ for idx, cell in enumerate(row):
1066
+ widths[idx] = max(widths[idx], len(cell))
1067
+
1068
+ click.echo("")
1069
+ click.echo(" ".join(h.ljust(widths[idx]) for idx, h in enumerate(header)))
1070
+ click.echo(" ".join("-" * widths[idx] for idx in range(len(header))))
1071
+ for row in rows:
1072
+ click.echo(" ".join(cell.ljust(widths[idx]) for idx, cell in enumerate(row)))
1073
+
1074
+
1075
+
1076
+ command = eval_command
1077
+
1078
+
1079
+ def get_command() -> click.Command:
1080
+ """Return the Click command implementing task-app evaluation."""
1081
+ return command
1082
+
1083
+
1084
+ def format_eval_error(err: EvalCliError) -> str:
1085
+ if isinstance(err, TomlUnavailableError):
1086
+ hint = err.hint or "Install tomli or use Python 3.11+."
1087
+ return f"TOML parser not available. {hint}"
1088
+ if isinstance(err, EvalConfigNotFoundError):
1089
+ return f"Eval config not found: {err.path}"
1090
+ if isinstance(err, EvalConfigParseError):
1091
+ return f"Failed to parse TOML '{err.path}': {err.detail}"
1092
+ if isinstance(err, MissingEvalTableError):
1093
+ return "Config must contain an [eval] table."
1094
+ if isinstance(err, InvalidEvalConfigError):
1095
+ return f"Invalid eval config: {err.detail}"
1096
+ if isinstance(err, SeedParseError):
1097
+ return f"Unable to parse seeds from '{err.value}'. Provide comma-separated integers."
1098
+ if isinstance(err, MetadataFilterFormatError):
1099
+ return f"Metadata filter '{err.entry}' must be key=value."
1100
+ if isinstance(err, TaskInfoUnavailableError):
1101
+ return "Task metadata filters require the task app to expose /task_info metadata."
1102
+ if isinstance(err, NoSeedsMatchedError):
1103
+ hint = err.hint or "Adjust the metadata filters or seed list."
1104
+ return f"No seeds match the provided metadata filters. {hint}"
1105
+ if isinstance(err, MetadataSQLExecutionError):
1106
+ return f"Failed to execute metadata SQL query '{err.query}': {err.detail}"
1107
+ if isinstance(err, MetadataSQLResultError):
1108
+ return f"metadata SQL query '{err.query}' must return integer seed values ({err.detail})"
1109
+ return str(err)