synth-ai 0.2.16__py3-none-any.whl → 0.2.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/analyze_semantic_words.sh +2 -2
- examples/baseline/banking77_baseline.py +204 -0
- examples/baseline/crafter_baseline.py +407 -0
- examples/baseline/pokemon_red_baseline.py +326 -0
- examples/baseline/simple_baseline.py +56 -0
- examples/baseline/warming_up_to_rl_baseline.py +239 -0
- examples/blog_posts/gepa/README.md +355 -0
- examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
- examples/blog_posts/gepa/configs/banking77_gepa_test.toml +82 -0
- examples/blog_posts/gepa/configs/banking77_mipro_local.toml +52 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/hover_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/hover_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/pupa_gepa_local.toml +60 -0
- examples/blog_posts/gepa/configs/pupa_mipro_local.toml +54 -0
- examples/blog_posts/gepa/deploy_banking77_task_app.sh +41 -0
- examples/blog_posts/gepa/gepa_baseline.py +204 -0
- examples/blog_posts/gepa/query_prompts_example.py +97 -0
- examples/blog_posts/gepa/run_gepa_banking77.sh +87 -0
- examples/blog_posts/gepa/task_apps.py +105 -0
- examples/blog_posts/gepa/test_gepa_local.sh +67 -0
- examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
- examples/blog_posts/pokemon_vl/README.md +98 -0
- examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
- examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +27 -0
- examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
- examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
- examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +43 -0
- examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
- examples/blog_posts/pokemon_vl/extract_images.py +239 -0
- examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
- examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
- examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
- examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
- examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
- examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
- examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
- examples/blog_posts/warming_up_to_rl/README.md +158 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
- examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
- examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
- examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +91 -0
- examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
- examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
- examples/dev/qwen3_32b_qlora_4xh100.toml +5 -0
- examples/multi_step/configs/VERILOG_REWARDS.md +4 -0
- examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +4 -0
- examples/multi_step/configs/crafter_rl_outcome.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +65 -107
- examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_simple.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
- examples/multi_step/configs/verilog_rl_lora.toml +80 -123
- examples/qwen_coder/configs/coder_lora_30b.toml +1 -3
- examples/qwen_coder/configs/coder_lora_4b.toml +4 -1
- examples/qwen_coder/configs/coder_lora_small.toml +1 -3
- examples/qwen_vl/README.md +10 -12
- examples/qwen_vl/SETUP_COMPLETE.md +7 -8
- examples/qwen_vl/VISION_TESTS_COMPLETE.md +2 -3
- examples/qwen_vl/collect_data_via_cli.md +76 -84
- examples/qwen_vl/collect_vision_traces.py +4 -4
- examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +40 -57
- examples/qwen_vl/configs/crafter_vlm_sft_example.toml +1 -2
- examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +20 -37
- examples/qwen_vl/configs/eval_gpt5nano_vision.toml +21 -40
- examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
- examples/qwen_vl/configs/{filter_qwen2vl_sft.toml → filter_qwen3vl_sft.toml} +4 -5
- examples/qwen_vl/configs/filter_vision_sft.toml +2 -3
- examples/qwen_vl/crafter_qwen_vl_agent.py +5 -5
- examples/qwen_vl/run_vision_comparison.sh +6 -7
- examples/rl/README.md +5 -5
- examples/rl/configs/rl_from_base_qwen.toml +26 -1
- examples/rl/configs/rl_from_base_qwen17.toml +6 -2
- examples/rl/task_app/README.md +1 -2
- examples/rl/task_app/math_single_step.py +2 -2
- examples/run_crafter_demo.sh +2 -2
- examples/sft/README.md +1 -1
- examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -1
- examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -1
- examples/swe/task_app/README.md +32 -2
- examples/swe/task_app/grpo_swe_mini.py +4 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +1 -1
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +37 -10
- examples/swe/task_app/hosted/inference/openai_client.py +4 -38
- examples/swe/task_app/hosted/policy_routes.py +17 -0
- examples/swe/task_app/hosted/rollout.py +4 -2
- examples/swe/task_app/morph_backend.py +178 -0
- examples/task_apps/banking77/__init__.py +6 -0
- examples/task_apps/banking77/banking77_task_app.py +841 -0
- examples/task_apps/banking77/deploy_wrapper.py +46 -0
- examples/task_apps/crafter/CREATE_SFT_DATASET.md +4 -0
- examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +4 -0
- examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +4 -0
- examples/task_apps/crafter/task_app/README.md +1 -1
- examples/task_apps/crafter/task_app/grpo_crafter.py +90 -5
- examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +1 -1
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +4 -26
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -2
- examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +49 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +372 -107
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +81 -12
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +82 -11
- examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +194 -1
- examples/task_apps/enron/task_app/grpo_enron_task_app.py +1 -1
- examples/task_apps/gepa_benchmarks/__init__.py +7 -0
- examples/task_apps/gepa_benchmarks/common.py +260 -0
- examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
- examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
- examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
- examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
- examples/task_apps/math/README.md +1 -2
- examples/task_apps/pokemon_red/README.md +3 -4
- examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +4 -0
- examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +6 -5
- examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +1 -2
- examples/task_apps/pokemon_red/task_app.py +288 -39
- examples/task_apps/sokoban/README.md +2 -3
- examples/task_apps/verilog/eval_groq_qwen32b.toml +12 -14
- examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +1 -1
- examples/vlm/configs/crafter_vlm_gpt4o.toml +4 -1
- examples/warming_up_to_rl/configs/crafter_fft.toml +4 -1
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +0 -2
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +3 -2
- examples/warming_up_to_rl/run_local_rollout_traced.py +1 -1
- examples/warming_up_to_rl/task_app/README.md +1 -1
- examples/warming_up_to_rl/task_app/grpo_crafter.py +185 -5
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +3 -27
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +49 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +156 -45
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +37 -4
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +33 -3
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +67 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen.toml +27 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +6 -0
- synth_ai/api/train/builders.py +99 -4
- synth_ai/api/train/cli.py +516 -26
- synth_ai/api/train/config_finder.py +13 -2
- synth_ai/api/train/configs/__init__.py +23 -2
- synth_ai/api/train/configs/prompt_learning.py +442 -0
- synth_ai/api/train/configs/rl.py +61 -7
- synth_ai/api/train/configs/sft.py +6 -2
- synth_ai/api/train/configs/shared.py +59 -2
- synth_ai/api/train/task_app.py +1 -1
- synth_ai/api/train/validators.py +277 -0
- synth_ai/auth/credentials.py +119 -0
- synth_ai/baseline/__init__.py +25 -0
- synth_ai/baseline/config.py +209 -0
- synth_ai/baseline/discovery.py +214 -0
- synth_ai/baseline/execution.py +146 -0
- synth_ai/cli/__init__.py +94 -18
- synth_ai/cli/__main__.py +0 -0
- synth_ai/cli/claude.py +70 -0
- synth_ai/cli/codex.py +84 -0
- synth_ai/cli/commands/__init__.py +18 -0
- synth_ai/cli/commands/baseline/__init__.py +12 -0
- synth_ai/cli/commands/baseline/core.py +637 -0
- synth_ai/cli/commands/baseline/list.py +93 -0
- synth_ai/cli/commands/demo/__init__.py +6 -0
- synth_ai/cli/commands/demo/core.py +163 -0
- synth_ai/cli/commands/eval/__init__.py +19 -0
- synth_ai/cli/commands/eval/core.py +1112 -0
- synth_ai/cli/commands/eval/errors.py +81 -0
- synth_ai/cli/commands/eval/validation.py +133 -0
- synth_ai/cli/commands/filter/__init__.py +12 -0
- synth_ai/cli/commands/filter/core.py +424 -0
- synth_ai/cli/commands/filter/errors.py +55 -0
- synth_ai/cli/commands/filter/validation.py +77 -0
- synth_ai/cli/commands/help/__init__.py +177 -0
- synth_ai/cli/commands/help/core.py +72 -0
- synth_ai/cli/commands/smoke/__init__.py +7 -0
- synth_ai/cli/commands/smoke/core.py +1436 -0
- synth_ai/cli/commands/status/__init__.py +64 -0
- synth_ai/cli/commands/status/client.py +192 -0
- synth_ai/cli/commands/status/config.py +92 -0
- synth_ai/cli/commands/status/errors.py +20 -0
- synth_ai/cli/commands/status/formatters.py +164 -0
- synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
- synth_ai/cli/commands/status/subcommands/files.py +79 -0
- synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
- synth_ai/cli/commands/status/subcommands/models.py +79 -0
- synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
- synth_ai/cli/commands/status/subcommands/runs.py +81 -0
- synth_ai/cli/commands/status/subcommands/summary.py +47 -0
- synth_ai/cli/commands/status/subcommands/usage.py +203 -0
- synth_ai/cli/commands/status/utils.py +114 -0
- synth_ai/cli/commands/train/__init__.py +53 -0
- synth_ai/cli/commands/train/core.py +21 -0
- synth_ai/cli/commands/train/errors.py +117 -0
- synth_ai/cli/commands/train/judge_schemas.py +200 -0
- synth_ai/cli/commands/train/judge_validation.py +305 -0
- synth_ai/cli/commands/train/validation.py +386 -0
- synth_ai/cli/demo.py +30 -158
- synth_ai/cli/deploy/__init__.py +43 -0
- synth_ai/cli/deploy.py +162 -0
- synth_ai/cli/eval/__init__.py +36 -0
- synth_ai/cli/eval/core.py +5 -0
- synth_ai/cli/eval/errors.py +31 -0
- synth_ai/cli/eval/validation.py +5 -0
- synth_ai/cli/filter/__init__.py +28 -0
- synth_ai/cli/filter/core.py +5 -0
- synth_ai/cli/filter/errors.py +23 -0
- synth_ai/cli/filter/validation.py +5 -0
- synth_ai/cli/legacy_root_backup.py +14 -8
- synth_ai/cli/modal_serve/__init__.py +12 -0
- synth_ai/cli/modal_serve/core.py +14 -0
- synth_ai/cli/modal_serve/errors.py +8 -0
- synth_ai/cli/modal_serve/validation.py +11 -0
- synth_ai/cli/opencode.py +107 -0
- synth_ai/cli/root.py +9 -5
- synth_ai/cli/serve/__init__.py +12 -0
- synth_ai/cli/serve/core.py +14 -0
- synth_ai/cli/serve/errors.py +8 -0
- synth_ai/cli/serve/validation.py +11 -0
- synth_ai/cli/setup.py +20 -265
- synth_ai/cli/status.py +7 -126
- synth_ai/cli/task_app_deploy.py +1 -10
- synth_ai/cli/task_app_modal_serve.py +4 -9
- synth_ai/cli/task_app_serve.py +4 -11
- synth_ai/cli/task_apps.py +51 -1480
- synth_ai/cli/train/__init__.py +12 -0
- synth_ai/cli/train/core.py +21 -0
- synth_ai/cli/train/errors.py +8 -0
- synth_ai/cli/train/validation.py +24 -0
- synth_ai/cli/train.py +1 -14
- synth_ai/demos/crafter/grpo_crafter_task_app.py +1 -1
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
- synth_ai/environments/examples/red/engine.py +33 -12
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
- synth_ai/environments/examples/red/environment.py +26 -0
- synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
- synth_ai/http.py +12 -0
- synth_ai/judge_schemas.py +10 -10
- synth_ai/learning/__init__.py +10 -0
- synth_ai/learning/prompt_learning_client.py +276 -0
- synth_ai/learning/prompt_learning_types.py +184 -0
- synth_ai/learning/rl/client.py +3 -1
- synth_ai/pricing/__init__.py +2 -0
- synth_ai/pricing/model_pricing.py +57 -0
- synth_ai/streaming/__init__.py +29 -0
- synth_ai/streaming/config.py +94 -0
- synth_ai/streaming/handlers.py +518 -0
- synth_ai/streaming/streamer.py +320 -0
- synth_ai/streaming/types.py +95 -0
- synth_ai/task/apps/__init__.py +1 -0
- synth_ai/task/config.py +2 -0
- synth_ai/task/tracing_utils.py +25 -25
- synth_ai/task/validators.py +45 -9
- synth_ai/task_app_cfgs.py +21 -0
- synth_ai/tracing_v3/config.py +162 -19
- synth_ai/tracing_v3/constants.py +1 -1
- synth_ai/tracing_v3/db_config.py +24 -38
- synth_ai/tracing_v3/migration_helper.py +1 -2
- synth_ai/tracing_v3/storage/config.py +47 -13
- synth_ai/tracing_v3/storage/factory.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +113 -11
- synth_ai/tracing_v3/turso/native_manager.py +92 -16
- synth_ai/types.py +8 -0
- synth_ai/urls.py +11 -0
- synth_ai/utils/__init__.py +30 -1
- synth_ai/utils/agents.py +74 -0
- synth_ai/utils/bin.py +39 -0
- synth_ai/utils/cli.py +149 -5
- synth_ai/utils/env.py +40 -33
- synth_ai/utils/http.py +4 -1
- synth_ai/utils/json.py +72 -0
- synth_ai/utils/modal.py +285 -3
- synth_ai/utils/paths.py +48 -0
- synth_ai/utils/uvicorn.py +113 -0
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/METADATA +109 -6
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/RECORD +291 -142
- examples/qwen_vl/configs/eval_qwen2vl_vision.toml +0 -44
- synth_ai/cli/tui.py +0 -62
- synth_ai/tui/__init__.py +0 -5
- synth_ai/tui/__main__.py +0 -13
- synth_ai/tui/cli/__init__.py +0 -1
- synth_ai/tui/cli/query_experiments.py +0 -164
- synth_ai/tui/cli/query_experiments_v3.py +0 -164
- synth_ai/tui/dashboard.py +0 -911
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1112 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
import importlib
|
|
6
|
+
import importlib.util
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import sqlite3
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
import uuid
|
|
13
|
+
from collections.abc import Sequence
|
|
14
|
+
from functools import lru_cache
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
17
|
+
|
|
18
|
+
import click
|
|
19
|
+
from synth_ai.task.config import EvalConfig
|
|
20
|
+
from synth_ai.tracing_v3.session_tracer import SessionTracer
|
|
21
|
+
from synth_ai.utils.task_app_discovery import discover_eval_config_paths
|
|
22
|
+
|
|
23
|
+
from .errors import (
|
|
24
|
+
EvalCliError,
|
|
25
|
+
EvalConfigNotFoundError,
|
|
26
|
+
EvalConfigParseError,
|
|
27
|
+
InvalidEvalConfigError,
|
|
28
|
+
MetadataFilterFormatError,
|
|
29
|
+
MetadataSQLExecutionError,
|
|
30
|
+
MetadataSQLResultError,
|
|
31
|
+
MissingEvalTableError,
|
|
32
|
+
NoSeedsMatchedError,
|
|
33
|
+
SeedParseError,
|
|
34
|
+
TaskInfoUnavailableError,
|
|
35
|
+
TomlUnavailableError,
|
|
36
|
+
)
|
|
37
|
+
from .validation import validate_eval_options
|
|
38
|
+
|
|
39
|
+
try: # Python 3.11+
|
|
40
|
+
import tomllib as _toml
|
|
41
|
+
except Exception: # pragma: no cover - fallback
|
|
42
|
+
_toml = None # type: ignore[assignment]
|
|
43
|
+
|
|
44
|
+
__all__ = ["command", "get_command", "format_eval_error"]
|
|
45
|
+
|
|
46
|
+
if TYPE_CHECKING:
|
|
47
|
+
from synth_ai.cli.task_apps import AppChoice, TaskAppEntryType
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@lru_cache(maxsize=1)
|
|
51
|
+
def _task_apps_module():
|
|
52
|
+
from synth_ai.cli import task_apps as module # local import to avoid circular deps
|
|
53
|
+
|
|
54
|
+
return module
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@click.command(
|
|
58
|
+
"eval",
|
|
59
|
+
help="Run one-off rollouts against a task app and print judge/eval summaries.",
|
|
60
|
+
)
|
|
61
|
+
@click.argument("app_id", type=str, required=False)
|
|
62
|
+
@click.option(
|
|
63
|
+
"--config",
|
|
64
|
+
type=click.Path(),
|
|
65
|
+
default=None,
|
|
66
|
+
help="Path to eval TOML (short schema). Auto-discovers the first matching file when omitted.",
|
|
67
|
+
)
|
|
68
|
+
@click.option(
|
|
69
|
+
"--url",
|
|
70
|
+
"task_app_url",
|
|
71
|
+
type=str,
|
|
72
|
+
default=None,
|
|
73
|
+
help="Base URL of a running task app instead of spawning locally (requires --env-file for secrets).",
|
|
74
|
+
)
|
|
75
|
+
@click.option(
|
|
76
|
+
"--seeds",
|
|
77
|
+
default="0,1,2,3,4",
|
|
78
|
+
help="Comma-separated seeds/indices to evaluate. Use negative numbers to wrap around the dataset.",
|
|
79
|
+
)
|
|
80
|
+
@click.option("--split", default="train", show_default=True, help="Dataset split to use")
|
|
81
|
+
@click.option(
|
|
82
|
+
"--model",
|
|
83
|
+
default=None,
|
|
84
|
+
help="Model identifier. When omitted the CLI will prompt based on task metadata.",
|
|
85
|
+
)
|
|
86
|
+
@click.option(
|
|
87
|
+
"--env-file",
|
|
88
|
+
multiple=True,
|
|
89
|
+
type=click.Path(),
|
|
90
|
+
help="Env file(s) to load (API keys, etc.). Required when using --url or remote judges.",
|
|
91
|
+
)
|
|
92
|
+
@click.option(
|
|
93
|
+
"--trace-db",
|
|
94
|
+
default="traces/v3/synth_ai.db",
|
|
95
|
+
show_default=True,
|
|
96
|
+
help="SQLite/Turso URL for storing rollout traces set to 'none' to disable persistence.",
|
|
97
|
+
)
|
|
98
|
+
@click.option(
|
|
99
|
+
"--metadata",
|
|
100
|
+
multiple=True,
|
|
101
|
+
help="Filter tasks by key=value metadata (e.g., --metadata difficulty=easy)",
|
|
102
|
+
)
|
|
103
|
+
@click.option(
|
|
104
|
+
"--metadata-sql",
|
|
105
|
+
default=None,
|
|
106
|
+
help="SQLite query that returns seeds to evaluate (e.g., SELECT seed FROM tasks WHERE difficulty='easy' LIMIT 5)",
|
|
107
|
+
)
|
|
108
|
+
def eval_command(
|
|
109
|
+
app_id: str | None,
|
|
110
|
+
config: str | None,
|
|
111
|
+
task_app_url: str | None,
|
|
112
|
+
seeds: str,
|
|
113
|
+
split: str,
|
|
114
|
+
model: str | None,
|
|
115
|
+
env_file: Sequence[str],
|
|
116
|
+
trace_db: str,
|
|
117
|
+
metadata: Sequence[str],
|
|
118
|
+
metadata_sql: str | None,
|
|
119
|
+
) -> None:
|
|
120
|
+
try:
|
|
121
|
+
return _eval_command_impl(
|
|
122
|
+
app_id=app_id,
|
|
123
|
+
config=config,
|
|
124
|
+
task_app_url=task_app_url,
|
|
125
|
+
seeds=seeds,
|
|
126
|
+
split=split,
|
|
127
|
+
model=model,
|
|
128
|
+
env_file=env_file,
|
|
129
|
+
trace_db=trace_db,
|
|
130
|
+
metadata=metadata,
|
|
131
|
+
metadata_sql=metadata_sql,
|
|
132
|
+
)
|
|
133
|
+
except EvalCliError as exc:
|
|
134
|
+
raise click.ClickException(format_eval_error(exc)) from exc
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _eval_command_impl(
|
|
138
|
+
app_id: str | None,
|
|
139
|
+
config: str | None,
|
|
140
|
+
task_app_url: str | None,
|
|
141
|
+
seeds: str,
|
|
142
|
+
split: str,
|
|
143
|
+
model: str | None,
|
|
144
|
+
env_file: Sequence[str],
|
|
145
|
+
trace_db: str,
|
|
146
|
+
metadata: Sequence[str],
|
|
147
|
+
metadata_sql: str | None,
|
|
148
|
+
) -> None:
|
|
149
|
+
"""Run rollouts against a task app and report judge statistics.
|
|
150
|
+
|
|
151
|
+
By default the command spins up the selected task app in-process, executes the
|
|
152
|
+
requested seeds, and prints aggregate scores (official and custom judges). When
|
|
153
|
+
pointing at a remote `--url`, supply matching `--env-file` values so the CLI can
|
|
154
|
+
forward authentication headers to the running service.
|
|
155
|
+
"""
|
|
156
|
+
module = _task_apps_module()
|
|
157
|
+
task_app_config_type = module.TaskAppConfig
|
|
158
|
+
create_task_app = module.create_task_app
|
|
159
|
+
select_app_choice = module._select_app_choice
|
|
160
|
+
determine_env_files = module._determine_env_files
|
|
161
|
+
load_env_files_into_process = module._load_env_files_into_process
|
|
162
|
+
store_trace = getattr(module, "_store_trace", None)
|
|
163
|
+
pearson = module._pearson
|
|
164
|
+
judge_spec_cls = module.JudgeSpec
|
|
165
|
+
session_tracer_cls = getattr(module, "SessionTracer", None)
|
|
166
|
+
|
|
167
|
+
# Parse and validate TOML config
|
|
168
|
+
|
|
169
|
+
cfg: dict[str, Any] = {}
|
|
170
|
+
eval_cfg: EvalConfig | None = None
|
|
171
|
+
config_path: Path | None = None
|
|
172
|
+
|
|
173
|
+
if config:
|
|
174
|
+
config_path = Path(config)
|
|
175
|
+
else:
|
|
176
|
+
auto_configs = discover_eval_config_paths()
|
|
177
|
+
if auto_configs:
|
|
178
|
+
config_path = auto_configs[0]
|
|
179
|
+
click.echo(f"Using eval config: {config_path}")
|
|
180
|
+
|
|
181
|
+
if config_path:
|
|
182
|
+
if _toml is None:
|
|
183
|
+
raise TomlUnavailableError()
|
|
184
|
+
if not config_path.exists():
|
|
185
|
+
raise EvalConfigNotFoundError(str(config_path))
|
|
186
|
+
try:
|
|
187
|
+
data = config_path.read_bytes()
|
|
188
|
+
parsed = _toml.loads(data.decode("utf-8"))
|
|
189
|
+
if isinstance(parsed, dict):
|
|
190
|
+
section = parsed.get("eval")
|
|
191
|
+
if section is None:
|
|
192
|
+
cfg = dict(parsed)
|
|
193
|
+
elif isinstance(section, dict):
|
|
194
|
+
cfg = dict(section)
|
|
195
|
+
else:
|
|
196
|
+
raise MissingEvalTableError()
|
|
197
|
+
except Exception as exc:
|
|
198
|
+
raise EvalConfigParseError(path=str(config_path), detail=str(exc)) from exc
|
|
199
|
+
|
|
200
|
+
if cfg:
|
|
201
|
+
try:
|
|
202
|
+
normalized_cfg = validate_eval_options(cfg)
|
|
203
|
+
normalized_cfg_dict = dict(normalized_cfg)
|
|
204
|
+
eval_cfg = EvalConfig.from_dict(normalized_cfg_dict)
|
|
205
|
+
cfg = normalized_cfg_dict
|
|
206
|
+
click.echo(f"✓ Config validated: {len(eval_cfg.seeds)} seeds, model={eval_cfg.model}")
|
|
207
|
+
except (ValueError, TypeError) as validation_error:
|
|
208
|
+
raise InvalidEvalConfigError(detail=str(validation_error)) from validation_error
|
|
209
|
+
else:
|
|
210
|
+
cfg = {}
|
|
211
|
+
|
|
212
|
+
# CLI args override config
|
|
213
|
+
if eval_cfg:
|
|
214
|
+
app_id = app_id or eval_cfg.app_id
|
|
215
|
+
else:
|
|
216
|
+
app_id = app_id or (cfg.get("app_id") if isinstance(cfg.get("app_id"), str) else None) # type: ignore
|
|
217
|
+
|
|
218
|
+
metadata_filters: dict[str, str] = {}
|
|
219
|
+
if eval_cfg:
|
|
220
|
+
metadata_filters.update(eval_cfg.metadata)
|
|
221
|
+
else:
|
|
222
|
+
cfg_metadata = cfg.get("metadata")
|
|
223
|
+
if isinstance(cfg_metadata, dict):
|
|
224
|
+
for key, value in cfg_metadata.items():
|
|
225
|
+
metadata_filters[str(key)] = str(value)
|
|
226
|
+
elif isinstance(cfg_metadata, list):
|
|
227
|
+
for item in cfg_metadata:
|
|
228
|
+
if isinstance(item, str) and "=" in item:
|
|
229
|
+
key, value = item.split("=", 1)
|
|
230
|
+
metadata_filters[key.strip()] = value.strip()
|
|
231
|
+
|
|
232
|
+
for item in metadata or ():
|
|
233
|
+
if "=" not in item:
|
|
234
|
+
raise MetadataFilterFormatError(entry=item)
|
|
235
|
+
key, value = item.split("=", 1)
|
|
236
|
+
key = key.strip()
|
|
237
|
+
value = value.strip()
|
|
238
|
+
if not key or not value:
|
|
239
|
+
raise MetadataFilterFormatError(entry=item)
|
|
240
|
+
metadata_filters[key] = value
|
|
241
|
+
|
|
242
|
+
metadata_sql_query: str | None = None
|
|
243
|
+
if eval_cfg and eval_cfg.metadata_sql:
|
|
244
|
+
metadata_sql_query = eval_cfg.metadata_sql
|
|
245
|
+
else:
|
|
246
|
+
cfg_metadata_sql = cfg.get("metadata_sql")
|
|
247
|
+
if isinstance(cfg_metadata_sql, dict):
|
|
248
|
+
metadata_sql_query = cfg_metadata_sql.get("query") or cfg_metadata_sql.get("sql")
|
|
249
|
+
elif isinstance(cfg_metadata_sql, str):
|
|
250
|
+
metadata_sql_query = cfg_metadata_sql
|
|
251
|
+
|
|
252
|
+
if metadata_sql:
|
|
253
|
+
metadata_sql_query = metadata_sql
|
|
254
|
+
if metadata_sql_query is not None:
|
|
255
|
+
metadata_sql_query = str(metadata_sql_query)
|
|
256
|
+
|
|
257
|
+
trace_db_url: str | None = None
|
|
258
|
+
trace_db = (trace_db or "").strip()
|
|
259
|
+
if trace_db and trace_db.lower() not in {"none", "off", "disable"}:
|
|
260
|
+
if "://" in trace_db:
|
|
261
|
+
trace_db_url = trace_db
|
|
262
|
+
else:
|
|
263
|
+
trace_path = Path(trace_db).expanduser()
|
|
264
|
+
trace_path.parent.mkdir(parents=True, exist_ok=True)
|
|
265
|
+
trace_db_url = f"sqlite+aiosqlite:///{trace_path}"
|
|
266
|
+
trace_tracer: SessionTracer | None = None
|
|
267
|
+
if trace_db_url and session_tracer_cls is not None:
|
|
268
|
+
trace_tracer = cast(SessionTracer, session_tracer_cls(db_url=trace_db_url, auto_save=True))
|
|
269
|
+
|
|
270
|
+
# Determine selection params (CLI takes precedence; TOML only fills unset model/seeds/env)
|
|
271
|
+
if cfg.get("model") and not model:
|
|
272
|
+
model = str(cfg["model"]) # type: ignore[index]
|
|
273
|
+
if cfg.get("seeds") and seeds == "0,1,2,3,4":
|
|
274
|
+
val = cfg["seeds"]
|
|
275
|
+
if isinstance(val, list):
|
|
276
|
+
with contextlib.suppress(Exception):
|
|
277
|
+
seeds = ",".join(str(int(x)) for x in val)
|
|
278
|
+
elif isinstance(val, str):
|
|
279
|
+
seeds = val
|
|
280
|
+
elif isinstance(val, int):
|
|
281
|
+
seeds = str(val)
|
|
282
|
+
if cfg.get("env_file") and not env_file:
|
|
283
|
+
ef = cfg["env_file"]
|
|
284
|
+
if isinstance(ef, str):
|
|
285
|
+
env_file = (ef,) # type: ignore[assignment]
|
|
286
|
+
elif isinstance(ef, list):
|
|
287
|
+
env_file = tuple(str(x) for x in ef) # type: ignore[assignment]
|
|
288
|
+
|
|
289
|
+
choice_for_env: AppChoice | None = None
|
|
290
|
+
entry: TaskAppEntryType | None = None
|
|
291
|
+
if task_app_url is None:
|
|
292
|
+
choice_for_env = select_app_choice(app_id, purpose="eval")
|
|
293
|
+
entry = choice_for_env.ensure_entry()
|
|
294
|
+
|
|
295
|
+
env_paths: list[Path] = []
|
|
296
|
+
if entry is not None:
|
|
297
|
+
original_env_path = choice_for_env.path if choice_for_env is not None else None
|
|
298
|
+
env_paths = determine_env_files(entry, env_file, original_path=original_env_path)
|
|
299
|
+
else:
|
|
300
|
+
if not env_file:
|
|
301
|
+
raise click.ClickException("--env-file is required when using --url")
|
|
302
|
+
for candidate in env_file:
|
|
303
|
+
p = Path(candidate).expanduser()
|
|
304
|
+
if not p.exists():
|
|
305
|
+
raise click.ClickException(f"Env file not found: {p}")
|
|
306
|
+
env_paths.append(p)
|
|
307
|
+
|
|
308
|
+
click.echo("Using env file(s): " + ", ".join(str(p) for p in env_paths))
|
|
309
|
+
load_env_files_into_process([str(Path(p)) for p in env_paths])
|
|
310
|
+
|
|
311
|
+
if task_app_url is None:
|
|
312
|
+
config = entry.config_factory() # type: ignore[union-attr]
|
|
313
|
+
# Help the type checker; runtime check also enforced in server.run_task_app
|
|
314
|
+
if not isinstance(config, task_app_config_type):
|
|
315
|
+
raise click.ClickException(
|
|
316
|
+
"Invalid task app: config_factory did not return TaskAppConfig"
|
|
317
|
+
)
|
|
318
|
+
app = create_task_app(config)
|
|
319
|
+
|
|
320
|
+
# Determine supported models
|
|
321
|
+
inference_meta: dict[str, Any] = {}
|
|
322
|
+
supported: list[str] = []
|
|
323
|
+
seen_models: set[str] = set()
|
|
324
|
+
|
|
325
|
+
def _add_supported_model(candidate: Any) -> None:
|
|
326
|
+
if not candidate:
|
|
327
|
+
return
|
|
328
|
+
text = str(candidate).strip()
|
|
329
|
+
if not text or text in seen_models:
|
|
330
|
+
return
|
|
331
|
+
supported.append(text)
|
|
332
|
+
seen_models.add(text)
|
|
333
|
+
|
|
334
|
+
if task_app_url is None:
|
|
335
|
+
try:
|
|
336
|
+
if hasattr(config, "base_task_info") and config.base_task_info:
|
|
337
|
+
inf_obj = getattr(config.base_task_info, "inference", None)
|
|
338
|
+
if inf_obj is not None:
|
|
339
|
+
if hasattr(inf_obj, "model_dump"):
|
|
340
|
+
inference_meta = dict(inf_obj.model_dump(exclude_none=True)) # type: ignore[attr-defined]
|
|
341
|
+
elif isinstance(inf_obj, dict):
|
|
342
|
+
inference_meta = dict(inf_obj)
|
|
343
|
+
except Exception:
|
|
344
|
+
inference_meta = {}
|
|
345
|
+
else:
|
|
346
|
+
try:
|
|
347
|
+
import httpx as _hx
|
|
348
|
+
|
|
349
|
+
headers = {}
|
|
350
|
+
api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
|
|
351
|
+
if api_key:
|
|
352
|
+
headers["X-API-Key"] = api_key
|
|
353
|
+
with _hx.Client(base_url=task_app_url, headers=headers, timeout=15.0) as c:
|
|
354
|
+
info = c.get("/info").json()
|
|
355
|
+
inf = info.get("inference") if isinstance(info, dict) else None
|
|
356
|
+
if isinstance(inf, dict):
|
|
357
|
+
inference_meta = dict(inf)
|
|
358
|
+
except Exception:
|
|
359
|
+
inference_meta = {}
|
|
360
|
+
|
|
361
|
+
default_model = inference_meta.get("model")
|
|
362
|
+
if isinstance(default_model, str):
|
|
363
|
+
_add_supported_model(default_model)
|
|
364
|
+
|
|
365
|
+
models_field = inference_meta.get("models")
|
|
366
|
+
if isinstance(models_field, list):
|
|
367
|
+
for candidate in models_field:
|
|
368
|
+
_add_supported_model(candidate)
|
|
369
|
+
|
|
370
|
+
supported_models = inference_meta.get("supported_models")
|
|
371
|
+
if isinstance(supported_models, list):
|
|
372
|
+
for candidate in supported_models:
|
|
373
|
+
_add_supported_model(candidate)
|
|
374
|
+
|
|
375
|
+
providers = inference_meta.get("providers")
|
|
376
|
+
if isinstance(providers, list):
|
|
377
|
+
if "openai" in providers:
|
|
378
|
+
_add_supported_model("gpt-5")
|
|
379
|
+
if "groq" in providers:
|
|
380
|
+
_add_supported_model("groq:llama-3.1-70b-versatile")
|
|
381
|
+
|
|
382
|
+
_add_supported_model("synth:qwen-0.6b")
|
|
383
|
+
|
|
384
|
+
selected_model = model
|
|
385
|
+
if not selected_model:
|
|
386
|
+
if not supported:
|
|
387
|
+
raise click.ClickException(
|
|
388
|
+
"No supported models; supply --model or add base_task_info.inference.model"
|
|
389
|
+
)
|
|
390
|
+
click.echo("Select model to evaluate:")
|
|
391
|
+
for idx, m in enumerate(supported, start=1):
|
|
392
|
+
click.echo(f" {idx}) {m}")
|
|
393
|
+
choice_idx = click.prompt("Enter choice", type=click.IntRange(1, len(supported)))
|
|
394
|
+
selected_model = supported[choice_idx - 1]
|
|
395
|
+
|
|
396
|
+
try:
|
|
397
|
+
seed_values = [int(s.strip()) for s in seeds.split(",") if s.strip()]
|
|
398
|
+
except Exception as exc:
|
|
399
|
+
raise SeedParseError(value=seeds) from exc
|
|
400
|
+
|
|
401
|
+
import httpx
|
|
402
|
+
|
|
403
|
+
headers = {}
|
|
404
|
+
api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
|
|
405
|
+
if api_key:
|
|
406
|
+
headers["X-API-Key"] = api_key
|
|
407
|
+
|
|
408
|
+
# Precompute optional policy overrides from TOML
|
|
409
|
+
policy_overrides: dict[str, Any] = {}
|
|
410
|
+
try:
|
|
411
|
+
# Accept [eval.policy] table or top-level keys for convenience
|
|
412
|
+
if isinstance(cfg.get("policy"), dict):
|
|
413
|
+
policy_overrides.update(dict(cfg["policy"]))
|
|
414
|
+
# Back-compat: allow temperature/max_tokens at top level
|
|
415
|
+
for k in (
|
|
416
|
+
"temperature",
|
|
417
|
+
"max_tokens",
|
|
418
|
+
"reasoning_effort",
|
|
419
|
+
"system_hint",
|
|
420
|
+
"tool_choice",
|
|
421
|
+
"inference_url",
|
|
422
|
+
):
|
|
423
|
+
if k in cfg and k not in policy_overrides:
|
|
424
|
+
policy_overrides[k] = cfg.get(k)
|
|
425
|
+
except Exception:
|
|
426
|
+
policy_overrides = {}
|
|
427
|
+
|
|
428
|
+
raw_concurrency = cfg.get("concurrency")
|
|
429
|
+
try:
|
|
430
|
+
concurrency_limit = int(raw_concurrency) if raw_concurrency is not None else 1
|
|
431
|
+
except Exception:
|
|
432
|
+
concurrency_limit = 1
|
|
433
|
+
if concurrency_limit <= 0:
|
|
434
|
+
concurrency_limit = 1
|
|
435
|
+
concurrency_limit = min(concurrency_limit, max(1, len(seed_values)))
|
|
436
|
+
|
|
437
|
+
judge_specs: list[Any] = []
|
|
438
|
+
|
|
439
|
+
def _register_judge(name_hint: str | None, judge_cfg: dict[str, Any]) -> None:
|
|
440
|
+
if not judge_cfg:
|
|
441
|
+
return
|
|
442
|
+
judge_module = judge_cfg.get("module")
|
|
443
|
+
judge_path = judge_cfg.get("path")
|
|
444
|
+
judge_callable_name = judge_cfg.get("callable") or judge_cfg.get("function")
|
|
445
|
+
if judge_module and judge_path:
|
|
446
|
+
raise click.ClickException("Judge config cannot set both 'module' and 'path'")
|
|
447
|
+
if not judge_module and not judge_path:
|
|
448
|
+
raise click.ClickException("Judge config requires 'module' or 'path'")
|
|
449
|
+
try:
|
|
450
|
+
if judge_module:
|
|
451
|
+
module = importlib.import_module(str(judge_module))
|
|
452
|
+
else:
|
|
453
|
+
path = Path(str(judge_path)).expanduser()
|
|
454
|
+
if not path.exists():
|
|
455
|
+
raise click.ClickException(f"Judge module path not found: {path}")
|
|
456
|
+
spec = importlib.util.spec_from_file_location(
|
|
457
|
+
f"_eval_judge_{path.stem}", path
|
|
458
|
+
)
|
|
459
|
+
if not spec or not spec.loader:
|
|
460
|
+
raise click.ClickException(f"Failed to load judge module from {path}")
|
|
461
|
+
module = importlib.util.module_from_spec(spec)
|
|
462
|
+
sys.modules[spec.name] = module
|
|
463
|
+
spec.loader.exec_module(module)
|
|
464
|
+
except click.ClickException:
|
|
465
|
+
raise
|
|
466
|
+
except Exception as exc:
|
|
467
|
+
raise click.ClickException(f"Unable to load judge module: {exc}") from exc
|
|
468
|
+
|
|
469
|
+
if judge_callable_name:
|
|
470
|
+
try:
|
|
471
|
+
judge_fn = getattr(module, str(judge_callable_name))
|
|
472
|
+
except AttributeError as exc:
|
|
473
|
+
raise click.ClickException(
|
|
474
|
+
f"Judge callable '{judge_callable_name}' not found in module"
|
|
475
|
+
) from exc
|
|
476
|
+
else:
|
|
477
|
+
if hasattr(module, "judge"):
|
|
478
|
+
judge_fn = module.judge
|
|
479
|
+
else:
|
|
480
|
+
raise click.ClickException("Judge module must expose 'judge' callable")
|
|
481
|
+
|
|
482
|
+
if not callable(judge_fn):
|
|
483
|
+
raise click.ClickException("Judge callable is not callable")
|
|
484
|
+
|
|
485
|
+
judge_kwargs = {
|
|
486
|
+
k: v
|
|
487
|
+
for k, v in judge_cfg.items()
|
|
488
|
+
if k not in {"module", "path", "callable", "function", "name"}
|
|
489
|
+
}
|
|
490
|
+
display_name = str(
|
|
491
|
+
judge_cfg.get("name")
|
|
492
|
+
or name_hint
|
|
493
|
+
or f"judge{len(judge_specs) + 1}"
|
|
494
|
+
)
|
|
495
|
+
judge_specs.append(judge_spec_cls(display_name, judge_fn, judge_kwargs))
|
|
496
|
+
|
|
497
|
+
raw_judge_cfg = cfg.get("judge")
|
|
498
|
+
if isinstance(raw_judge_cfg, dict) and raw_judge_cfg:
|
|
499
|
+
direct_keys = {"module", "path", "callable", "function", "name"}
|
|
500
|
+
has_direct_keys = any(key in raw_judge_cfg for key in direct_keys)
|
|
501
|
+
nested_candidates = [
|
|
502
|
+
(key, value)
|
|
503
|
+
for key, value in raw_judge_cfg.items()
|
|
504
|
+
if isinstance(value, dict)
|
|
505
|
+
]
|
|
506
|
+
if has_direct_keys and not nested_candidates:
|
|
507
|
+
_register_judge(None, raw_judge_cfg)
|
|
508
|
+
else:
|
|
509
|
+
for sub_name, sub_cfg in nested_candidates:
|
|
510
|
+
_register_judge(sub_name, sub_cfg)
|
|
511
|
+
|
|
512
|
+
raw_judges_list = cfg.get("judges")
|
|
513
|
+
if isinstance(raw_judges_list, list):
|
|
514
|
+
for _index, entry in enumerate(raw_judges_list, start=1):
|
|
515
|
+
if isinstance(entry, dict):
|
|
516
|
+
_register_judge(entry.get("name") or f"judge{len(judge_specs) + 1}", entry)
|
|
517
|
+
|
|
518
|
+
records: list[dict[str, Any]] = []
|
|
519
|
+
|
|
520
|
+
successes = 0
|
|
521
|
+
failures = 0
|
|
522
|
+
# Aggregate outcome stats across successful seeds
|
|
523
|
+
outcome_sum: float = 0.0
|
|
524
|
+
outcome_count: int = 0
|
|
525
|
+
outcome_correct: int = 0
|
|
526
|
+
|
|
527
|
+
def _build_task_rows(taskset: Any) -> dict[int, dict[str, Any]]:
|
|
528
|
+
rows: dict[int, dict[str, Any]] = {}
|
|
529
|
+
if not isinstance(taskset, dict):
|
|
530
|
+
return rows
|
|
531
|
+
|
|
532
|
+
scenario_ids = taskset.get("scenario_ids") or []
|
|
533
|
+
loop_ids = taskset.get("loop_ids") or []
|
|
534
|
+
thread_ids = taskset.get("thread_ids") or []
|
|
535
|
+
difficulty_map = taskset.get("difficulty_map") or {}
|
|
536
|
+
|
|
537
|
+
max_len = max(len(scenario_ids), len(loop_ids), len(thread_ids))
|
|
538
|
+
for seed in range(max_len):
|
|
539
|
+
scenario_id = scenario_ids[seed] if seed < len(scenario_ids) else None
|
|
540
|
+
loop_id = loop_ids[seed] if seed < len(loop_ids) else None
|
|
541
|
+
thread_id = thread_ids[seed] if seed < len(thread_ids) else None
|
|
542
|
+
difficulty = None
|
|
543
|
+
if isinstance(difficulty_map, dict):
|
|
544
|
+
if scenario_id and scenario_id in difficulty_map:
|
|
545
|
+
difficulty = difficulty_map.get(scenario_id)
|
|
546
|
+
elif str(seed) in difficulty_map:
|
|
547
|
+
difficulty = difficulty_map.get(str(seed))
|
|
548
|
+
|
|
549
|
+
rows[seed] = {
|
|
550
|
+
"seed": seed,
|
|
551
|
+
"scenario_id": scenario_id,
|
|
552
|
+
"loop_id": loop_id,
|
|
553
|
+
"thread_id": thread_id,
|
|
554
|
+
"difficulty": difficulty,
|
|
555
|
+
}
|
|
556
|
+
return rows
|
|
557
|
+
|
|
558
|
+
def _apply_metadata_filters(
|
|
559
|
+
rows: dict[int, dict[str, Any]], seeds_list: list[int], filters: dict[str, str]
|
|
560
|
+
) -> list[int]:
|
|
561
|
+
if not filters:
|
|
562
|
+
return seeds_list
|
|
563
|
+
filtered: list[int] = []
|
|
564
|
+
for seed in seeds_list:
|
|
565
|
+
row = rows.get(seed)
|
|
566
|
+
if not row:
|
|
567
|
+
continue
|
|
568
|
+
include = True
|
|
569
|
+
for key, expected in filters.items():
|
|
570
|
+
actual = row.get(key)
|
|
571
|
+
if actual is None:
|
|
572
|
+
include = False
|
|
573
|
+
break
|
|
574
|
+
if str(actual).lower() != expected.lower():
|
|
575
|
+
include = False
|
|
576
|
+
break
|
|
577
|
+
if include:
|
|
578
|
+
filtered.append(seed)
|
|
579
|
+
return filtered
|
|
580
|
+
|
|
581
|
+
def _apply_metadata_sql(
|
|
582
|
+
rows: dict[int, dict[str, Any]], seeds_list: list[int], query: str
|
|
583
|
+
) -> list[int]:
|
|
584
|
+
"""Return seeds that satisfy an arbitrary SQL query.
|
|
585
|
+
|
|
586
|
+
The query is executed against an in-memory SQLite table named `tasks`
|
|
587
|
+
with columns (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT).
|
|
588
|
+
Any rows whose `seed` value (or first column if `seed` is absent) appear in the result set are retained.
|
|
589
|
+
"""
|
|
590
|
+
if not query:
|
|
591
|
+
return seeds_list
|
|
592
|
+
conn = sqlite3.connect(":memory:")
|
|
593
|
+
try:
|
|
594
|
+
cur = conn.cursor()
|
|
595
|
+
cur.execute(
|
|
596
|
+
"CREATE TABLE tasks (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT)"
|
|
597
|
+
)
|
|
598
|
+
insert_stmt = (
|
|
599
|
+
"INSERT INTO tasks (seed, scenario_id, loop_id, thread_id, difficulty) VALUES (?,?,?,?,?)"
|
|
600
|
+
)
|
|
601
|
+
for seed in seeds_list:
|
|
602
|
+
row = rows.get(seed, {})
|
|
603
|
+
cur.execute(
|
|
604
|
+
insert_stmt,
|
|
605
|
+
[
|
|
606
|
+
seed,
|
|
607
|
+
row.get("scenario_id"),
|
|
608
|
+
row.get("loop_id"),
|
|
609
|
+
row.get("thread_id"),
|
|
610
|
+
row.get("difficulty"),
|
|
611
|
+
],
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
result = cur.execute(query)
|
|
615
|
+
fetched = result.fetchall()
|
|
616
|
+
if not fetched:
|
|
617
|
+
return []
|
|
618
|
+
description = result.description or []
|
|
619
|
+
col_names = [col[0] for col in description]
|
|
620
|
+
seeds_out: list[int] = []
|
|
621
|
+
for entry in fetched:
|
|
622
|
+
value = entry[col_names.index("seed")] if "seed" in col_names else entry[0]
|
|
623
|
+
try:
|
|
624
|
+
seeds_out.append(int(value))
|
|
625
|
+
except Exception as exc:
|
|
626
|
+
raise MetadataSQLResultError(
|
|
627
|
+
query=query,
|
|
628
|
+
detail="non-integer value returned",
|
|
629
|
+
) from exc
|
|
630
|
+
seeds_set = set(seeds_out)
|
|
631
|
+
return [seed for seed in seeds_list if seed in seeds_set]
|
|
632
|
+
except sqlite3.Error as exc:
|
|
633
|
+
raise MetadataSQLExecutionError(query=query, detail=str(exc)) from exc
|
|
634
|
+
finally:
|
|
635
|
+
conn.close()
|
|
636
|
+
|
|
637
|
+
async def _run_eval() -> None:
|
|
638
|
+
nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records, seed_values
|
|
639
|
+
|
|
640
|
+
if trace_tracer is not None and trace_tracer.db is None:
|
|
641
|
+
await trace_tracer.initialize()
|
|
642
|
+
|
|
643
|
+
if task_app_url is None:
|
|
644
|
+
transport = httpx.ASGITransport(app=app) # type: ignore[name-defined]
|
|
645
|
+
async_client = httpx.AsyncClient(
|
|
646
|
+
transport=cast(Any, transport),
|
|
647
|
+
base_url="http://eval.local",
|
|
648
|
+
timeout=300.0,
|
|
649
|
+
follow_redirects=True,
|
|
650
|
+
headers=headers,
|
|
651
|
+
)
|
|
652
|
+
else:
|
|
653
|
+
async_client = httpx.AsyncClient(
|
|
654
|
+
base_url=task_app_url,
|
|
655
|
+
timeout=300.0,
|
|
656
|
+
follow_redirects=True,
|
|
657
|
+
headers=headers,
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
try:
|
|
661
|
+
taskset_payload: dict[str, Any] | None = None
|
|
662
|
+
try:
|
|
663
|
+
task_info_response = await async_client.get("/task_info")
|
|
664
|
+
except Exception:
|
|
665
|
+
task_info_response = None
|
|
666
|
+
if task_info_response is not None and task_info_response.status_code == 200:
|
|
667
|
+
with contextlib.suppress(Exception):
|
|
668
|
+
payload_json = task_info_response.json()
|
|
669
|
+
if isinstance(payload_json, dict) and "taskset" in payload_json:
|
|
670
|
+
taskset_payload = payload_json.get("taskset")
|
|
671
|
+
if not isinstance(taskset_payload, dict):
|
|
672
|
+
taskset_payload = None
|
|
673
|
+
elif isinstance(payload_json, dict):
|
|
674
|
+
taskset_payload = payload_json
|
|
675
|
+
|
|
676
|
+
available_seeds = list(seed_values)
|
|
677
|
+
if metadata_sql_query or metadata_filters:
|
|
678
|
+
if not taskset_payload:
|
|
679
|
+
raise TaskInfoUnavailableError()
|
|
680
|
+
rows = _build_task_rows(taskset_payload)
|
|
681
|
+
if metadata_sql_query:
|
|
682
|
+
available_seeds = _apply_metadata_sql(rows, available_seeds, metadata_sql_query)
|
|
683
|
+
if metadata_filters:
|
|
684
|
+
available_seeds = _apply_metadata_filters(rows, available_seeds, metadata_filters)
|
|
685
|
+
if not available_seeds:
|
|
686
|
+
raise NoSeedsMatchedError()
|
|
687
|
+
seed_values = available_seeds
|
|
688
|
+
|
|
689
|
+
semaphore = asyncio.Semaphore(concurrency_limit)
|
|
690
|
+
|
|
691
|
+
async def _run_seed(seed_val: int) -> None:
|
|
692
|
+
nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records
|
|
693
|
+
# Read env_name and policy_name from config if available
|
|
694
|
+
env_name = cfg.get("env_name") or (cfg.get("env", {}).get("env_name") if isinstance(cfg.get("env"), dict) else None)
|
|
695
|
+
policy_name = cfg.get("policy_name") or (cfg.get("policy", {}).get("policy_name") if isinstance(cfg.get("policy"), dict) else None)
|
|
696
|
+
env_config_overrides = cfg.get("env_config", {}) if isinstance(cfg.get("env_config"), dict) else {}
|
|
697
|
+
policy_config_overrides = cfg.get("policy_config", {}) if isinstance(cfg.get("policy_config"), dict) else {}
|
|
698
|
+
|
|
699
|
+
# Debug: print config parsing
|
|
700
|
+
if seed_val == 0:
|
|
701
|
+
click.echo(f"[DEBUG] env_name from config: {env_name}")
|
|
702
|
+
click.echo(f"[DEBUG] policy_name from config: {policy_name}")
|
|
703
|
+
|
|
704
|
+
# Generate default ops sequence if not provided
|
|
705
|
+
max_llm_calls = policy_config_overrides.get("max_llm_calls", 10)
|
|
706
|
+
ops_list = cfg.get("ops", [])
|
|
707
|
+
if not ops_list:
|
|
708
|
+
# Generate default "agent, env" pairs for max_llm_calls
|
|
709
|
+
ops_list = ["agent", "env"] * int(max_llm_calls)
|
|
710
|
+
|
|
711
|
+
body = {
|
|
712
|
+
"run_id": str(uuid.uuid4()),
|
|
713
|
+
"env": {"config": {"split": split, "index": seed_val, **env_config_overrides}, "seed": seed_val},
|
|
714
|
+
"policy": {
|
|
715
|
+
"policy_name": policy_name or selected_model,
|
|
716
|
+
"config": {"model": selected_model, **policy_overrides, **policy_config_overrides},
|
|
717
|
+
},
|
|
718
|
+
"ops": ops_list,
|
|
719
|
+
"record": {
|
|
720
|
+
"return_trace": cfg.get("return_trace", True),
|
|
721
|
+
"trace_format": cfg.get("trace_format", "structured"),
|
|
722
|
+
},
|
|
723
|
+
"mode": "eval", # RolloutMode.EVAL: use inference URLs as-is, no transformations
|
|
724
|
+
}
|
|
725
|
+
if env_name:
|
|
726
|
+
env_section = body.get("env")
|
|
727
|
+
if isinstance(env_section, dict):
|
|
728
|
+
env_section["env_name"] = env_name
|
|
729
|
+
else:
|
|
730
|
+
body["env"] = {"env_name": env_name}
|
|
731
|
+
|
|
732
|
+
# Debug: print the body being sent
|
|
733
|
+
if seed_val == 0:
|
|
734
|
+
click.echo(f"[DEBUG] rollout body env: {body['env']}")
|
|
735
|
+
click.echo(f"[DEBUG] rollout body policy: {body['policy']}")
|
|
736
|
+
click.echo(f"[DEBUG] rollout body mode: {body.get('mode', 'NOT SET')}")
|
|
737
|
+
rollout_elapsed: float | None = None
|
|
738
|
+
rollout_start = time.perf_counter()
|
|
739
|
+
try:
|
|
740
|
+
import logging
|
|
741
|
+
_log = logging.getLogger(__name__)
|
|
742
|
+
_log.info(f"[EVAL_BODY_DEBUG] Sending body with mode={body.get('mode')}")
|
|
743
|
+
async with semaphore:
|
|
744
|
+
response = await async_client.post("/rollout", json=body)
|
|
745
|
+
rollout_elapsed = time.perf_counter() - rollout_start
|
|
746
|
+
except Exception as exc:
|
|
747
|
+
failures += 1
|
|
748
|
+
click.echo(f"seed={seed_val} error={exc}")
|
|
749
|
+
return
|
|
750
|
+
|
|
751
|
+
ok = 200 <= response.status_code < 300
|
|
752
|
+
if ok:
|
|
753
|
+
successes += 1
|
|
754
|
+
else:
|
|
755
|
+
failures += 1
|
|
756
|
+
|
|
757
|
+
summary = [f"seed={seed_val}", f"status={response.status_code}"]
|
|
758
|
+
data: Any
|
|
759
|
+
try:
|
|
760
|
+
data = response.json()
|
|
761
|
+
except Exception:
|
|
762
|
+
data = None
|
|
763
|
+
|
|
764
|
+
# Debug: print validation errors
|
|
765
|
+
if response.status_code == 422 and data:
|
|
766
|
+
click.echo(f"[DEBUG] 422 Validation Error: {data}")
|
|
767
|
+
|
|
768
|
+
metrics: dict[str, Any] | None = None
|
|
769
|
+
completion: str | None = None
|
|
770
|
+
prompt_index: int | None = None
|
|
771
|
+
prompt_text: str | None = None
|
|
772
|
+
task_id: str | None = None
|
|
773
|
+
task_split: str | None = None
|
|
774
|
+
task_rubric_id: str | None = None
|
|
775
|
+
|
|
776
|
+
trace_namespace: dict[str, Any] | None = None
|
|
777
|
+
session_trace_dict: dict[str, Any] | None = None
|
|
778
|
+
|
|
779
|
+
if isinstance(data, dict):
|
|
780
|
+
import logging
|
|
781
|
+
_logger = logging.getLogger(__name__)
|
|
782
|
+
_logger.info(f"[EVAL_DEBUG] Response data keys: {list(data.keys())}")
|
|
783
|
+
if "detail" in data:
|
|
784
|
+
_logger.error(f"[EVAL_DEBUG] Task app returned error: {data['detail']}")
|
|
785
|
+
trace_namespace = data.get("trace")
|
|
786
|
+
_logger.info(f"[EVAL_DEBUG] trace_namespace type: {type(trace_namespace)}, value: {trace_namespace if not isinstance(trace_namespace, dict) else 'dict with keys: ' + str(list(trace_namespace.keys()) if trace_namespace else 'None')}")
|
|
787
|
+
if not isinstance(trace_namespace, dict):
|
|
788
|
+
raise RuntimeError(
|
|
789
|
+
"The 'synth-ai eval' command requires trace payloads in rollout responses. "
|
|
790
|
+
"Ensure the rollout request includes 'trace_format': 'structured' and 'return_trace': true, "
|
|
791
|
+
"and that task app tracing is enabled (TASKAPP_TRACING_ENABLED=1). "
|
|
792
|
+
"Note: This is specific to the eval command - general rollout endpoints don't require traces."
|
|
793
|
+
)
|
|
794
|
+
# Handle both "compact" and "full" trace formats:
|
|
795
|
+
# - compact: trace_namespace contains {session_id, metadata, ...}
|
|
796
|
+
# - full: trace_namespace IS the full session_trace dict
|
|
797
|
+
session_trace_dict = trace_namespace.get("session_trace")
|
|
798
|
+
if not isinstance(session_trace_dict, dict):
|
|
799
|
+
# If no session_trace key, assume "full" format where trace itself is the session_trace
|
|
800
|
+
if "session_id" in trace_namespace:
|
|
801
|
+
session_trace_dict = trace_namespace
|
|
802
|
+
else:
|
|
803
|
+
raise RuntimeError(
|
|
804
|
+
"The 'synth-ai eval' command requires 'session_trace' in the trace payload or a valid full trace format. "
|
|
805
|
+
"Ensure the task app is using tracing_v3 and returning structured trace data."
|
|
806
|
+
)
|
|
807
|
+
metrics = data.get("metrics") if isinstance(data.get("metrics"), dict) else None
|
|
808
|
+
if metrics:
|
|
809
|
+
mean_return = metrics.get("mean_return") or metrics.get("total_reward")
|
|
810
|
+
outcome = metrics.get("outcome_score")
|
|
811
|
+
if mean_return is not None:
|
|
812
|
+
summary.append(f"mean_return={mean_return}")
|
|
813
|
+
if outcome is not None:
|
|
814
|
+
summary.append(f"outcome={outcome}")
|
|
815
|
+
try:
|
|
816
|
+
val = float(outcome)
|
|
817
|
+
outcome_sum += val
|
|
818
|
+
outcome_count += 1
|
|
819
|
+
if val >= 0.5:
|
|
820
|
+
outcome_correct += 1
|
|
821
|
+
except Exception:
|
|
822
|
+
pass
|
|
823
|
+
trajs = (
|
|
824
|
+
data.get("trajectories")
|
|
825
|
+
if isinstance(data.get("trajectories"), list)
|
|
826
|
+
else None
|
|
827
|
+
)
|
|
828
|
+
if trajs:
|
|
829
|
+
first = trajs[0] if trajs else None
|
|
830
|
+
steps = first.get("steps") if isinstance(first, dict) else None
|
|
831
|
+
if isinstance(steps, list) and steps:
|
|
832
|
+
step0 = steps[0]
|
|
833
|
+
tool_calls = step0.get("tool_calls") or step0.get("tools") or []
|
|
834
|
+
if isinstance(tool_calls, list):
|
|
835
|
+
summary.append(f"tool_calls={len(tool_calls)}")
|
|
836
|
+
obs = step0.get("obs") if isinstance(step0, dict) else None
|
|
837
|
+
if isinstance(obs, dict):
|
|
838
|
+
idx_val = obs.get("prompt_index")
|
|
839
|
+
if isinstance(idx_val, int):
|
|
840
|
+
prompt_index = idx_val
|
|
841
|
+
prompt_raw = obs.get("prompt")
|
|
842
|
+
if isinstance(prompt_raw, str):
|
|
843
|
+
prompt_text = prompt_raw
|
|
844
|
+
if task_id is None:
|
|
845
|
+
candidate_id = obs.get("task_id")
|
|
846
|
+
if isinstance(candidate_id, str) and candidate_id:
|
|
847
|
+
task_id = candidate_id
|
|
848
|
+
if task_split is None:
|
|
849
|
+
candidate_split = obs.get("task_split")
|
|
850
|
+
if isinstance(candidate_split, str) and candidate_split:
|
|
851
|
+
task_split = candidate_split
|
|
852
|
+
if task_rubric_id is None:
|
|
853
|
+
candidate_rid = obs.get("task_rubric_id")
|
|
854
|
+
if isinstance(candidate_rid, str) and candidate_rid:
|
|
855
|
+
task_rubric_id = candidate_rid
|
|
856
|
+
final = first.get("final") if isinstance(first, dict) else None
|
|
857
|
+
if isinstance(final, dict):
|
|
858
|
+
final_obs = final.get("observation")
|
|
859
|
+
if isinstance(final_obs, dict):
|
|
860
|
+
comp_val = final_obs.get("completion")
|
|
861
|
+
if isinstance(comp_val, str):
|
|
862
|
+
completion = comp_val
|
|
863
|
+
if task_id is None:
|
|
864
|
+
candidate_id = final_obs.get("task_id")
|
|
865
|
+
if isinstance(candidate_id, str) and candidate_id:
|
|
866
|
+
task_id = candidate_id
|
|
867
|
+
if task_split is None:
|
|
868
|
+
candidate_split = final_obs.get("task_split")
|
|
869
|
+
if isinstance(candidate_split, str) and candidate_split:
|
|
870
|
+
task_split = candidate_split
|
|
871
|
+
if task_rubric_id is None:
|
|
872
|
+
candidate_rid = final_obs.get("task_rubric_id")
|
|
873
|
+
if isinstance(candidate_rid, str) and candidate_rid:
|
|
874
|
+
task_rubric_id = candidate_rid
|
|
875
|
+
final_info = final.get("info")
|
|
876
|
+
if isinstance(final_info, dict):
|
|
877
|
+
if task_id is None:
|
|
878
|
+
candidate_id = final_info.get("task_id")
|
|
879
|
+
if isinstance(candidate_id, str) and candidate_id:
|
|
880
|
+
task_id = candidate_id
|
|
881
|
+
if task_split is None:
|
|
882
|
+
candidate_split = final_info.get("task_split")
|
|
883
|
+
if isinstance(candidate_split, str) and candidate_split:
|
|
884
|
+
task_split = candidate_split
|
|
885
|
+
if task_rubric_id is None:
|
|
886
|
+
candidate_rid = final_info.get("task_rubric_id")
|
|
887
|
+
if isinstance(candidate_rid, str) and candidate_rid:
|
|
888
|
+
task_rubric_id = candidate_rid
|
|
889
|
+
if task_id:
|
|
890
|
+
summary.append(f"task_id={task_id}")
|
|
891
|
+
click.echo(" ".join(summary))
|
|
892
|
+
with contextlib.suppress(Exception):
|
|
893
|
+
click.echo(json.dumps(data, indent=2))
|
|
894
|
+
else:
|
|
895
|
+
click.echo(" ".join(summary))
|
|
896
|
+
|
|
897
|
+
official_score = None
|
|
898
|
+
if isinstance(metrics, dict):
|
|
899
|
+
for key in ("mean_return", "total_reward", "outcome_score"):
|
|
900
|
+
val = metrics.get(key)
|
|
901
|
+
if isinstance(val, int | float):
|
|
902
|
+
official_score = float(val)
|
|
903
|
+
break
|
|
904
|
+
if official_score is None and isinstance(data, dict):
|
|
905
|
+
try:
|
|
906
|
+
reward_val = data["trajectories"][0]["steps"][0].get("reward")
|
|
907
|
+
if isinstance(reward_val, int | float):
|
|
908
|
+
official_score = float(reward_val)
|
|
909
|
+
except Exception:
|
|
910
|
+
pass
|
|
911
|
+
|
|
912
|
+
if official_score is not None:
|
|
913
|
+
if official_score < 0.0:
|
|
914
|
+
official_score = 0.0
|
|
915
|
+
elif official_score > 1.0:
|
|
916
|
+
official_score = min(1.0, official_score)
|
|
917
|
+
|
|
918
|
+
judge_scores: dict[str, float | None] = {}
|
|
919
|
+
judges_timings: dict[str, float | None] = {}
|
|
920
|
+
timings: dict[str, Any] = {
|
|
921
|
+
"rollout_s": rollout_elapsed,
|
|
922
|
+
"judges": judges_timings,
|
|
923
|
+
}
|
|
924
|
+
if judge_specs:
|
|
925
|
+
for spec in judge_specs:
|
|
926
|
+
score_value: float | None = None
|
|
927
|
+
judge_elapsed: float | None = None
|
|
928
|
+
# Run judges for all tasks (text-based and trajectory-based)
|
|
929
|
+
# Text-based tasks have completion, trajectory-based tasks use response
|
|
930
|
+
judge_payload = {
|
|
931
|
+
"seed": seed_val,
|
|
932
|
+
"prompt_index": prompt_index,
|
|
933
|
+
"prompt": prompt_text,
|
|
934
|
+
"completion": completion,
|
|
935
|
+
"metrics": metrics,
|
|
936
|
+
"response": data,
|
|
937
|
+
"trace": trace_namespace,
|
|
938
|
+
}
|
|
939
|
+
try:
|
|
940
|
+
judge_start = time.perf_counter()
|
|
941
|
+
result = spec.fn(judge_payload, **spec.kwargs)
|
|
942
|
+
judge_elapsed = time.perf_counter() - judge_start
|
|
943
|
+
if isinstance(result, int | float):
|
|
944
|
+
score_value = float(result)
|
|
945
|
+
except Exception as exc:
|
|
946
|
+
if judge_elapsed is None:
|
|
947
|
+
judge_elapsed = time.perf_counter() - judge_start
|
|
948
|
+
click.echo(f"seed={seed_val} judge[{spec.name}]_error={exc}")
|
|
949
|
+
judges_timings[spec.name] = judge_elapsed
|
|
950
|
+
judge_scores[spec.name] = score_value
|
|
951
|
+
|
|
952
|
+
if trace_tracer is not None and trace_namespace:
|
|
953
|
+
storage_metadata = {
|
|
954
|
+
"eval_seed": seed_val,
|
|
955
|
+
"prompt_index": prompt_index,
|
|
956
|
+
"task_id": task_id,
|
|
957
|
+
"task_split": task_split,
|
|
958
|
+
"task_rubric_id": task_rubric_id,
|
|
959
|
+
"official_score": official_score,
|
|
960
|
+
"judge_scores": judge_scores,
|
|
961
|
+
"model": selected_model,
|
|
962
|
+
"prompt": prompt_text,
|
|
963
|
+
"completion": completion,
|
|
964
|
+
}
|
|
965
|
+
if store_trace is not None:
|
|
966
|
+
await store_trace(trace_tracer, trace_namespace, storage_metadata)
|
|
967
|
+
|
|
968
|
+
records.append(
|
|
969
|
+
{
|
|
970
|
+
"seed": seed_val,
|
|
971
|
+
"prompt_index": prompt_index,
|
|
972
|
+
"task_id": task_id,
|
|
973
|
+
"task_split": task_split,
|
|
974
|
+
"task_rubric_id": task_rubric_id,
|
|
975
|
+
"official_score": official_score,
|
|
976
|
+
"judge_scores": judge_scores,
|
|
977
|
+
"timings": timings,
|
|
978
|
+
}
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
await asyncio.gather(*[_run_seed(seed_val) for seed_val in seed_values])
|
|
982
|
+
finally:
|
|
983
|
+
await async_client.aclose()
|
|
984
|
+
|
|
985
|
+
try:
|
|
986
|
+
asyncio.run(_run_eval())
|
|
987
|
+
finally:
|
|
988
|
+
if trace_tracer is not None and trace_tracer.db is not None:
|
|
989
|
+
asyncio.run(trace_tracer.db.close())
|
|
990
|
+
|
|
991
|
+
click.echo(
|
|
992
|
+
f"Eval complete: {successes} ok, {failures} failed; model={selected_model}, split={split}"
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
if outcome_count > 0:
|
|
996
|
+
mean_outcome = outcome_sum / float(outcome_count)
|
|
997
|
+
frac_right = outcome_correct / float(outcome_count)
|
|
998
|
+
click.echo(
|
|
999
|
+
f"Outcome summary: correct={outcome_correct}/{outcome_count} ({frac_right:.2%}), mean_outcome={mean_outcome:.3f}"
|
|
1000
|
+
)
|
|
1001
|
+
|
|
1002
|
+
if records:
|
|
1003
|
+
judge_specs = judge_specs or [] # ensure iterable
|
|
1004
|
+
official_scores = [
|
|
1005
|
+
r["official_score"] for r in records if r["official_score"] is not None
|
|
1006
|
+
]
|
|
1007
|
+
if official_scores:
|
|
1008
|
+
click.echo(f" Official mean: {sum(official_scores) / len(official_scores):.3f}")
|
|
1009
|
+
else:
|
|
1010
|
+
click.echo(" Official mean: n/a")
|
|
1011
|
+
|
|
1012
|
+
for spec in judge_specs:
|
|
1013
|
+
spec_scores = [
|
|
1014
|
+
record["judge_scores"].get(spec.name)
|
|
1015
|
+
for record in records
|
|
1016
|
+
if record["judge_scores"].get(spec.name) is not None
|
|
1017
|
+
]
|
|
1018
|
+
if spec_scores:
|
|
1019
|
+
mean_spec = sum(spec_scores) / len(spec_scores)
|
|
1020
|
+
click.echo(f" [{spec.name}] mean: {mean_spec:.3f}")
|
|
1021
|
+
else:
|
|
1022
|
+
click.echo(f" [{spec.name}] mean: n/a")
|
|
1023
|
+
|
|
1024
|
+
paired = [
|
|
1025
|
+
(
|
|
1026
|
+
record["official_score"],
|
|
1027
|
+
record["judge_scores"].get(spec.name),
|
|
1028
|
+
)
|
|
1029
|
+
for record in records
|
|
1030
|
+
if record["official_score"] is not None
|
|
1031
|
+
and record["judge_scores"].get(spec.name) is not None
|
|
1032
|
+
]
|
|
1033
|
+
if len(paired) >= 2:
|
|
1034
|
+
corr = pearson(
|
|
1035
|
+
[p[0] for p in paired if p[0] is not None],
|
|
1036
|
+
[p[1] for p in paired if p[1] is not None],
|
|
1037
|
+
)
|
|
1038
|
+
if corr is not None:
|
|
1039
|
+
click.echo(f" Pearson r: {corr:.3f}")
|
|
1040
|
+
else:
|
|
1041
|
+
click.echo(" Pearson r: undefined (zero variance)")
|
|
1042
|
+
else:
|
|
1043
|
+
click.echo(" Pearson r: n/a (need ≥2 paired scores)")
|
|
1044
|
+
|
|
1045
|
+
header = ["Seed", "Prompt", "Official"]
|
|
1046
|
+
header.extend(spec.name for spec in judge_specs)
|
|
1047
|
+
rows: list[list[str]] = []
|
|
1048
|
+
for record in sorted(records, key=lambda r: (r["seed"], r.get("prompt_index") or -1)):
|
|
1049
|
+
seed_val = str(record["seed"])
|
|
1050
|
+
prompt_idx = (
|
|
1051
|
+
str(record["prompt_index"])
|
|
1052
|
+
if record["prompt_index"] is not None
|
|
1053
|
+
else "-"
|
|
1054
|
+
)
|
|
1055
|
+
official_val = (
|
|
1056
|
+
f"{record['official_score']:.3f}"
|
|
1057
|
+
if record["official_score"] is not None
|
|
1058
|
+
else "-"
|
|
1059
|
+
)
|
|
1060
|
+
row = [seed_val, prompt_idx, official_val]
|
|
1061
|
+
for spec in judge_specs:
|
|
1062
|
+
score_val = record["judge_scores"].get(spec.name)
|
|
1063
|
+
row.append(f"{score_val:.3f}" if isinstance(score_val, int | float) else "-")
|
|
1064
|
+
rows.append(row)
|
|
1065
|
+
|
|
1066
|
+
widths = [len(col) for col in header]
|
|
1067
|
+
for row in rows:
|
|
1068
|
+
for idx, cell in enumerate(row):
|
|
1069
|
+
widths[idx] = max(widths[idx], len(cell))
|
|
1070
|
+
|
|
1071
|
+
click.echo("")
|
|
1072
|
+
click.echo(" ".join(h.ljust(widths[idx]) for idx, h in enumerate(header)))
|
|
1073
|
+
click.echo(" ".join("-" * widths[idx] for idx in range(len(header))))
|
|
1074
|
+
for row in rows:
|
|
1075
|
+
click.echo(" ".join(cell.ljust(widths[idx]) for idx, cell in enumerate(row)))
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
command = eval_command
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
def get_command() -> click.Command:
|
|
1083
|
+
"""Return the Click command implementing task-app evaluation."""
|
|
1084
|
+
return command
|
|
1085
|
+
|
|
1086
|
+
|
|
1087
|
+
def format_eval_error(err: EvalCliError) -> str:
|
|
1088
|
+
if isinstance(err, TomlUnavailableError):
|
|
1089
|
+
hint = err.hint or "Install tomli or use Python 3.11+."
|
|
1090
|
+
return f"TOML parser not available. {hint}"
|
|
1091
|
+
if isinstance(err, EvalConfigNotFoundError):
|
|
1092
|
+
return f"Eval config not found: {err.path}"
|
|
1093
|
+
if isinstance(err, EvalConfigParseError):
|
|
1094
|
+
return f"Failed to parse TOML '{err.path}': {err.detail}"
|
|
1095
|
+
if isinstance(err, MissingEvalTableError):
|
|
1096
|
+
return "Config must contain an [eval] table."
|
|
1097
|
+
if isinstance(err, InvalidEvalConfigError):
|
|
1098
|
+
return f"Invalid eval config: {err.detail}"
|
|
1099
|
+
if isinstance(err, SeedParseError):
|
|
1100
|
+
return f"Unable to parse seeds from '{err.value}'. Provide comma-separated integers."
|
|
1101
|
+
if isinstance(err, MetadataFilterFormatError):
|
|
1102
|
+
return f"Metadata filter '{err.entry}' must be key=value."
|
|
1103
|
+
if isinstance(err, TaskInfoUnavailableError):
|
|
1104
|
+
return "Task metadata filters require the task app to expose /task_info metadata."
|
|
1105
|
+
if isinstance(err, NoSeedsMatchedError):
|
|
1106
|
+
hint = err.hint or "Adjust the metadata filters or seed list."
|
|
1107
|
+
return f"No seeds match the provided metadata filters. {hint}"
|
|
1108
|
+
if isinstance(err, MetadataSQLExecutionError):
|
|
1109
|
+
return f"Failed to execute metadata SQL query '{err.query}': {err.detail}"
|
|
1110
|
+
if isinstance(err, MetadataSQLResultError):
|
|
1111
|
+
return f"metadata SQL query '{err.query}' must return integer seed values ({err.detail})"
|
|
1112
|
+
return str(err)
|