synth-ai 0.4.1__py3-none-any.whl → 0.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- synth_ai/__init__.py +13 -13
- synth_ai/cli/__init__.py +6 -15
- synth_ai/cli/commands/eval/__init__.py +6 -15
- synth_ai/cli/commands/eval/config.py +338 -0
- synth_ai/cli/commands/eval/core.py +236 -1091
- synth_ai/cli/commands/eval/runner.py +704 -0
- synth_ai/cli/commands/eval/validation.py +44 -117
- synth_ai/cli/commands/filter/core.py +7 -7
- synth_ai/cli/commands/filter/validation.py +2 -2
- synth_ai/cli/commands/smoke/core.py +7 -17
- synth_ai/cli/commands/status/__init__.py +1 -64
- synth_ai/cli/commands/status/client.py +50 -151
- synth_ai/cli/commands/status/config.py +3 -83
- synth_ai/cli/commands/status/errors.py +4 -13
- synth_ai/cli/commands/status/subcommands/__init__.py +2 -8
- synth_ai/cli/commands/status/subcommands/config.py +13 -0
- synth_ai/cli/commands/status/subcommands/files.py +18 -63
- synth_ai/cli/commands/status/subcommands/jobs.py +28 -311
- synth_ai/cli/commands/status/subcommands/models.py +18 -62
- synth_ai/cli/commands/status/subcommands/runs.py +16 -63
- synth_ai/cli/commands/status/subcommands/session.py +67 -172
- synth_ai/cli/commands/status/subcommands/summary.py +24 -32
- synth_ai/cli/commands/status/subcommands/utils.py +41 -0
- synth_ai/cli/commands/status/utils.py +16 -107
- synth_ai/cli/commands/train/__init__.py +18 -20
- synth_ai/cli/commands/train/errors.py +3 -3
- synth_ai/cli/commands/train/prompt_learning_validation.py +15 -16
- synth_ai/cli/commands/train/validation.py +7 -7
- synth_ai/cli/commands/train/{judge_schemas.py → verifier_schemas.py} +33 -34
- synth_ai/cli/commands/train/verifier_validation.py +235 -0
- synth_ai/cli/demo_apps/demo_task_apps/math/config.toml +0 -1
- synth_ai/cli/demo_apps/demo_task_apps/math/modal_task_app.py +2 -6
- synth_ai/cli/demo_apps/math/config.toml +0 -1
- synth_ai/cli/demo_apps/math/modal_task_app.py +2 -6
- synth_ai/cli/demo_apps/mipro/task_app.py +25 -47
- synth_ai/cli/lib/apps/task_app.py +12 -13
- synth_ai/cli/lib/task_app_discovery.py +6 -6
- synth_ai/cli/lib/train_cfgs.py +10 -10
- synth_ai/cli/task_apps/__init__.py +11 -0
- synth_ai/cli/task_apps/commands.py +7 -15
- synth_ai/core/env.py +12 -1
- synth_ai/core/errors.py +1 -2
- synth_ai/core/integrations/cloudflare.py +209 -33
- synth_ai/core/tracing_v3/abstractions.py +46 -0
- synth_ai/data/__init__.py +3 -30
- synth_ai/data/enums.py +1 -20
- synth_ai/data/rewards.py +100 -3
- synth_ai/products/graph_evolve/__init__.py +1 -2
- synth_ai/products/graph_evolve/config.py +16 -16
- synth_ai/products/graph_evolve/converters/__init__.py +3 -3
- synth_ai/products/graph_evolve/converters/openai_sft.py +7 -7
- synth_ai/products/graph_evolve/examples/hotpotqa/config.toml +1 -1
- synth_ai/products/graph_gepa/__init__.py +23 -0
- synth_ai/products/graph_gepa/converters/__init__.py +19 -0
- synth_ai/products/graph_gepa/converters/openai_sft.py +29 -0
- synth_ai/sdk/__init__.py +45 -35
- synth_ai/sdk/api/eval/__init__.py +33 -0
- synth_ai/sdk/api/eval/job.py +732 -0
- synth_ai/sdk/api/research_agent/__init__.py +276 -66
- synth_ai/sdk/api/train/builders.py +181 -0
- synth_ai/sdk/api/train/cli.py +41 -33
- synth_ai/sdk/api/train/configs/__init__.py +6 -4
- synth_ai/sdk/api/train/configs/prompt_learning.py +127 -33
- synth_ai/sdk/api/train/configs/rl.py +264 -16
- synth_ai/sdk/api/train/configs/sft.py +165 -1
- synth_ai/sdk/api/train/graph_validators.py +12 -12
- synth_ai/sdk/api/train/graphgen.py +169 -51
- synth_ai/sdk/api/train/graphgen_models.py +95 -45
- synth_ai/sdk/api/train/local_api.py +10 -0
- synth_ai/sdk/api/train/pollers.py +36 -0
- synth_ai/sdk/api/train/prompt_learning.py +390 -60
- synth_ai/sdk/api/train/rl.py +41 -5
- synth_ai/sdk/api/train/sft.py +2 -0
- synth_ai/sdk/api/train/task_app.py +20 -0
- synth_ai/sdk/api/train/validators.py +17 -17
- synth_ai/sdk/graphs/completions.py +239 -33
- synth_ai/sdk/{judging/schemas.py → graphs/verifier_schemas.py} +23 -23
- synth_ai/sdk/learning/__init__.py +35 -5
- synth_ai/sdk/learning/context_learning_client.py +531 -0
- synth_ai/sdk/learning/context_learning_types.py +294 -0
- synth_ai/sdk/learning/prompt_learning_client.py +1 -1
- synth_ai/sdk/learning/prompt_learning_types.py +2 -1
- synth_ai/sdk/learning/rl/__init__.py +0 -4
- synth_ai/sdk/learning/rl/contracts.py +0 -4
- synth_ai/sdk/localapi/__init__.py +40 -0
- synth_ai/sdk/localapi/apps/__init__.py +28 -0
- synth_ai/sdk/localapi/client.py +10 -0
- synth_ai/sdk/localapi/contracts.py +10 -0
- synth_ai/sdk/localapi/helpers.py +519 -0
- synth_ai/sdk/localapi/rollouts.py +93 -0
- synth_ai/sdk/localapi/server.py +29 -0
- synth_ai/sdk/localapi/template.py +49 -0
- synth_ai/sdk/streaming/handlers.py +6 -6
- synth_ai/sdk/streaming/streamer.py +10 -6
- synth_ai/sdk/task/__init__.py +18 -5
- synth_ai/sdk/task/apps/__init__.py +37 -1
- synth_ai/sdk/task/client.py +9 -1
- synth_ai/sdk/task/config.py +6 -11
- synth_ai/sdk/task/contracts.py +137 -95
- synth_ai/sdk/task/in_process.py +32 -22
- synth_ai/sdk/task/in_process_runner.py +9 -4
- synth_ai/sdk/task/rubrics/__init__.py +2 -3
- synth_ai/sdk/task/rubrics/loaders.py +4 -4
- synth_ai/sdk/task/rubrics/strict.py +3 -4
- synth_ai/sdk/task/server.py +76 -16
- synth_ai/sdk/task/trace_correlation_helpers.py +190 -139
- synth_ai/sdk/task/validators.py +34 -49
- synth_ai/sdk/training/__init__.py +7 -16
- synth_ai/sdk/tunnels/__init__.py +118 -0
- synth_ai/sdk/tunnels/cleanup.py +83 -0
- synth_ai/sdk/tunnels/ports.py +120 -0
- synth_ai/sdk/tunnels/tunneled_api.py +363 -0
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/METADATA +71 -4
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/RECORD +118 -128
- synth_ai/cli/commands/baseline/__init__.py +0 -12
- synth_ai/cli/commands/baseline/core.py +0 -636
- synth_ai/cli/commands/baseline/list.py +0 -94
- synth_ai/cli/commands/eval/errors.py +0 -81
- synth_ai/cli/commands/status/formatters.py +0 -164
- synth_ai/cli/commands/status/subcommands/pricing.py +0 -23
- synth_ai/cli/commands/status/subcommands/usage.py +0 -203
- synth_ai/cli/commands/train/judge_validation.py +0 -305
- synth_ai/cli/usage.py +0 -159
- synth_ai/data/specs.py +0 -36
- synth_ai/sdk/api/research_agent/cli.py +0 -428
- synth_ai/sdk/api/research_agent/config.py +0 -357
- synth_ai/sdk/api/research_agent/job.py +0 -717
- synth_ai/sdk/baseline/__init__.py +0 -25
- synth_ai/sdk/baseline/config.py +0 -209
- synth_ai/sdk/baseline/discovery.py +0 -216
- synth_ai/sdk/baseline/execution.py +0 -154
- synth_ai/sdk/judging/__init__.py +0 -15
- synth_ai/sdk/judging/base.py +0 -24
- synth_ai/sdk/judging/client.py +0 -191
- synth_ai/sdk/judging/types.py +0 -42
- synth_ai/sdk/research_agent/__init__.py +0 -34
- synth_ai/sdk/research_agent/container_builder.py +0 -328
- synth_ai/sdk/research_agent/container_spec.py +0 -198
- synth_ai/sdk/research_agent/defaults.py +0 -34
- synth_ai/sdk/research_agent/results_collector.py +0 -69
- synth_ai/sdk/specs/__init__.py +0 -46
- synth_ai/sdk/specs/dataclasses.py +0 -149
- synth_ai/sdk/specs/loader.py +0 -144
- synth_ai/sdk/specs/serializer.py +0 -199
- synth_ai/sdk/specs/validation.py +0 -250
- synth_ai/sdk/tracing/__init__.py +0 -39
- synth_ai/sdk/usage/__init__.py +0 -37
- synth_ai/sdk/usage/client.py +0 -171
- synth_ai/sdk/usage/models.py +0 -261
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/WHEEL +0 -0
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/top_level.txt +0 -0
|
@@ -1,1113 +1,258 @@
|
|
|
1
|
+
"""Eval command CLI entry point for task app rollouts.
|
|
2
|
+
|
|
3
|
+
**Status:** Alpha
|
|
4
|
+
|
|
5
|
+
This module provides the Click command-line interface for the `synth-ai eval` command.
|
|
6
|
+
|
|
7
|
+
**Command Overview:**
|
|
8
|
+
The eval command executes rollouts against a task app and summarizes results.
|
|
9
|
+
It supports two execution modes:
|
|
10
|
+
|
|
11
|
+
1. **Direct Mode**: Calls task app directly (no backend required)
|
|
12
|
+
2. **Backend Mode**: Routes through backend for trace capture and cost tracking
|
|
13
|
+
|
|
14
|
+
**Usage:**
|
|
15
|
+
```bash
|
|
16
|
+
# Basic usage with config file
|
|
17
|
+
python -m synth_ai.cli eval \
|
|
18
|
+
--config banking77_eval.toml \
|
|
19
|
+
--url http://localhost:8103
|
|
20
|
+
|
|
21
|
+
# With backend for trace capture
|
|
22
|
+
python -m synth_ai.cli eval \
|
|
23
|
+
--config banking77_eval.toml \
|
|
24
|
+
--url http://localhost:8103 \
|
|
25
|
+
--backend http://localhost:8000
|
|
26
|
+
|
|
27
|
+
# Override seeds from command line
|
|
28
|
+
python -m synth_ai.cli eval \
|
|
29
|
+
--config banking77_eval.toml \
|
|
30
|
+
--url http://localhost:8103 \
|
|
31
|
+
--seeds 0,1,2,3,4
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
**Configuration:**
|
|
35
|
+
Configuration can come from:
|
|
36
|
+
- TOML config file (`--config`)
|
|
37
|
+
- Command-line arguments (override config)
|
|
38
|
+
- Environment variables (for API keys, etc.)
|
|
39
|
+
|
|
40
|
+
Config file format:
|
|
41
|
+
```toml
|
|
42
|
+
[eval]
|
|
43
|
+
app_id = "banking77"
|
|
44
|
+
url = "http://localhost:8103"
|
|
45
|
+
env_name = "banking77"
|
|
46
|
+
seeds = [0, 1, 2, 3, 4]
|
|
47
|
+
|
|
48
|
+
[eval.policy_config]
|
|
49
|
+
model = "gpt-4"
|
|
50
|
+
provider = "openai"
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
**Output:**
|
|
54
|
+
- Prints results table to stdout
|
|
55
|
+
- Optionally writes report to `--output-txt`
|
|
56
|
+
- Optionally writes JSON to `--output-json`
|
|
57
|
+
- Optionally saves traces to `--traces-dir`
|
|
58
|
+
|
|
59
|
+
**See Also:**
|
|
60
|
+
- `synth_ai.cli.commands.eval.runner`: Evaluation execution logic
|
|
61
|
+
- `synth_ai.cli.commands.eval.config`: Configuration loading
|
|
62
|
+
- `monorepo/docs/cli/eval.mdx`: Full CLI documentation
|
|
63
|
+
"""
|
|
64
|
+
|
|
1
65
|
from __future__ import annotations
|
|
2
66
|
|
|
3
67
|
import asyncio
|
|
4
|
-
import contextlib
|
|
5
|
-
import importlib
|
|
6
|
-
import importlib.util
|
|
7
68
|
import json
|
|
8
|
-
import
|
|
9
|
-
import sqlite3
|
|
10
|
-
import sys
|
|
11
|
-
import time
|
|
12
|
-
import uuid
|
|
13
|
-
from collections.abc import Sequence
|
|
14
|
-
from functools import lru_cache
|
|
69
|
+
from dataclasses import asdict
|
|
15
70
|
from pathlib import Path
|
|
16
|
-
from typing import TYPE_CHECKING, Any, cast
|
|
17
71
|
|
|
18
72
|
import click
|
|
73
|
+
from dotenv import load_dotenv
|
|
19
74
|
|
|
20
|
-
from synth_ai.cli.lib.task_app_discovery import discover_eval_config_paths
|
|
21
|
-
from synth_ai.core.tracing_v3.session_tracer import SessionTracer
|
|
22
|
-
from synth_ai.sdk.task.config import EvalConfig
|
|
23
|
-
|
|
24
|
-
from .errors import (
|
|
25
|
-
EvalCliError,
|
|
26
|
-
EvalConfigNotFoundError,
|
|
27
|
-
EvalConfigParseError,
|
|
28
|
-
InvalidEvalConfigError,
|
|
29
|
-
MetadataFilterFormatError,
|
|
30
|
-
MetadataSQLExecutionError,
|
|
31
|
-
MetadataSQLResultError,
|
|
32
|
-
MissingEvalTableError,
|
|
33
|
-
NoSeedsMatchedError,
|
|
34
|
-
SeedParseError,
|
|
35
|
-
TaskInfoUnavailableError,
|
|
36
|
-
TomlUnavailableError,
|
|
37
|
-
)
|
|
38
75
|
from .validation import validate_eval_options
|
|
39
76
|
|
|
40
|
-
try: # Python 3.11+
|
|
41
|
-
import tomllib as _toml
|
|
42
|
-
except Exception: # pragma: no cover - fallback
|
|
43
|
-
_toml = None # type: ignore[assignment]
|
|
44
|
-
|
|
45
|
-
__all__ = ["command", "get_command", "format_eval_error"]
|
|
46
|
-
|
|
47
|
-
if TYPE_CHECKING:
|
|
48
|
-
from synth_ai.cli.task_apps import AppChoice, TaskAppEntryType
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
@lru_cache(maxsize=1)
|
|
52
|
-
def _task_apps_module():
|
|
53
|
-
from synth_ai.cli import task_apps as module # local import to avoid circular deps
|
|
54
|
-
|
|
55
|
-
return module
|
|
56
77
|
|
|
57
|
-
|
|
58
|
-
@click.
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
)
|
|
62
|
-
@click.
|
|
63
|
-
@click.option(
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
)
|
|
69
|
-
@click.option(
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
)
|
|
76
|
-
@click.option(
|
|
77
|
-
"--seeds",
|
|
78
|
-
default="0,1,2,3,4",
|
|
79
|
-
help="Comma-separated seeds/indices to evaluate. Use negative numbers to wrap around the dataset.",
|
|
80
|
-
)
|
|
81
|
-
@click.option("--split", default="train", show_default=True, help="Dataset split to use")
|
|
82
|
-
@click.option(
|
|
83
|
-
"--model",
|
|
84
|
-
default=None,
|
|
85
|
-
help="Model identifier. When omitted the CLI will prompt based on task metadata.",
|
|
86
|
-
)
|
|
87
|
-
@click.option(
|
|
88
|
-
"--env-file",
|
|
89
|
-
multiple=True,
|
|
90
|
-
type=click.Path(),
|
|
91
|
-
help="Env file(s) to load (API keys, etc.). Required when using --url or remote judges.",
|
|
92
|
-
)
|
|
93
|
-
@click.option(
|
|
94
|
-
"--trace-db",
|
|
95
|
-
default="traces/v3/synth_ai.db",
|
|
96
|
-
show_default=True,
|
|
97
|
-
help="SQLite/Turso URL for storing rollout traces set to 'none' to disable persistence.",
|
|
98
|
-
)
|
|
99
|
-
@click.option(
|
|
100
|
-
"--metadata",
|
|
101
|
-
multiple=True,
|
|
102
|
-
help="Filter tasks by key=value metadata (e.g., --metadata difficulty=easy)",
|
|
103
|
-
)
|
|
104
|
-
@click.option(
|
|
105
|
-
"--metadata-sql",
|
|
106
|
-
default=None,
|
|
107
|
-
help="SQLite query that returns seeds to evaluate (e.g., SELECT seed FROM tasks WHERE difficulty='easy' LIMIT 5)",
|
|
108
|
-
)
|
|
78
|
+
@click.command()
|
|
79
|
+
@click.argument("app_id", required=False)
|
|
80
|
+
@click.option("--model", required=False, default="")
|
|
81
|
+
@click.option("--config", "config_path", required=False, default="")
|
|
82
|
+
@click.option("--trace-db", required=False, default="")
|
|
83
|
+
@click.option("--metadata", multiple=True)
|
|
84
|
+
@click.option("--seeds", required=False, default="")
|
|
85
|
+
@click.option("--url", required=False, default="")
|
|
86
|
+
@click.option("--backend", required=False, default="")
|
|
87
|
+
@click.option("--env-file", required=False, default="")
|
|
88
|
+
@click.option("--ops", required=False, default="")
|
|
89
|
+
@click.option("--return-trace", is_flag=True, default=False)
|
|
90
|
+
@click.option("--concurrency", required=False, default="")
|
|
91
|
+
@click.option("--seed-set", type=click.Choice(["seeds", "validation_seeds", "test_pool"]), default="seeds")
|
|
92
|
+
@click.option("--wait", is_flag=True, default=False)
|
|
93
|
+
@click.option("--poll", required=False, default="")
|
|
94
|
+
@click.option("--output", "output_path", required=False, default="")
|
|
95
|
+
@click.option("--traces-dir", required=False, default="")
|
|
96
|
+
@click.option("--output-txt", required=False, default="")
|
|
97
|
+
@click.option("--output-json", required=False, default="")
|
|
109
98
|
def eval_command(
|
|
110
99
|
app_id: str | None,
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
seeds: str,
|
|
114
|
-
split: str,
|
|
115
|
-
model: str | None,
|
|
116
|
-
env_file: Sequence[str],
|
|
100
|
+
model: str,
|
|
101
|
+
config_path: str,
|
|
117
102
|
trace_db: str,
|
|
118
|
-
metadata:
|
|
119
|
-
metadata_sql: str | None,
|
|
120
|
-
) -> None:
|
|
121
|
-
try:
|
|
122
|
-
return _eval_command_impl(
|
|
123
|
-
app_id=app_id,
|
|
124
|
-
config=config,
|
|
125
|
-
task_app_url=task_app_url,
|
|
126
|
-
seeds=seeds,
|
|
127
|
-
split=split,
|
|
128
|
-
model=model,
|
|
129
|
-
env_file=env_file,
|
|
130
|
-
trace_db=trace_db,
|
|
131
|
-
metadata=metadata,
|
|
132
|
-
metadata_sql=metadata_sql,
|
|
133
|
-
)
|
|
134
|
-
except EvalCliError as exc:
|
|
135
|
-
raise click.ClickException(format_eval_error(exc)) from exc
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def _eval_command_impl(
|
|
139
|
-
app_id: str | None,
|
|
140
|
-
config: str | None,
|
|
141
|
-
task_app_url: str | None,
|
|
103
|
+
metadata: tuple[str, ...],
|
|
142
104
|
seeds: str,
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
env_file:
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
105
|
+
url: str,
|
|
106
|
+
backend: str,
|
|
107
|
+
env_file: str,
|
|
108
|
+
ops: str,
|
|
109
|
+
return_trace: bool,
|
|
110
|
+
concurrency: str,
|
|
111
|
+
seed_set: str,
|
|
112
|
+
wait: bool,
|
|
113
|
+
poll: str,
|
|
114
|
+
output_path: str,
|
|
115
|
+
traces_dir: str,
|
|
116
|
+
output_txt: str,
|
|
117
|
+
output_json: str,
|
|
149
118
|
) -> None:
|
|
150
|
-
"""
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
119
|
+
"""Execute evaluation rollouts against a task app.
|
|
120
|
+
|
|
121
|
+
This is the main CLI entry point for the `synth-ai eval` command.
|
|
122
|
+
|
|
123
|
+
**Execution Modes:**
|
|
124
|
+
- **Direct Mode**: If `--backend` is not provided, calls task app directly
|
|
125
|
+
- **Backend Mode**: If `--backend` is provided, creates eval job on backend
|
|
126
|
+
|
|
127
|
+
**Arguments:**
|
|
128
|
+
app_id: Task app identifier (optional, can be in config)
|
|
129
|
+
model: Model name to override config (optional)
|
|
130
|
+
config_path: Path to TOML config file (optional)
|
|
131
|
+
url: Task app URL (required if not in config)
|
|
132
|
+
backend: Backend URL for trace capture (optional)
|
|
133
|
+
seeds: Comma-separated seed list (e.g., "0,1,2,3")
|
|
134
|
+
concurrency: Number of parallel rollouts (default: 1)
|
|
135
|
+
return_trace: Whether to include traces in response
|
|
136
|
+
traces_dir: Directory to save trace files
|
|
137
|
+
output_txt: Path to write text report
|
|
138
|
+
output_json: Path to write JSON report
|
|
139
|
+
|
|
140
|
+
**Example:**
|
|
141
|
+
```bash
|
|
142
|
+
python -m synth_ai.cli eval \
|
|
143
|
+
--config banking77_eval.toml \
|
|
144
|
+
--url http://localhost:8103 \
|
|
145
|
+
--backend http://localhost:8000 \
|
|
146
|
+
--seeds 0,1,2,3,4 \
|
|
147
|
+
--concurrency 5 \
|
|
148
|
+
--output-json results.json \
|
|
149
|
+
--traces-dir traces/
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
**See Also:**
|
|
153
|
+
- `synth_ai.cli.commands.eval.runner.run_eval()`: Execution logic
|
|
154
|
+
- `synth_ai.cli.commands.eval.config.resolve_eval_config()`: Config resolution
|
|
156
155
|
"""
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
else:
|
|
177
|
-
auto_configs = discover_eval_config_paths()
|
|
178
|
-
if auto_configs:
|
|
179
|
-
config_path = auto_configs[0]
|
|
180
|
-
click.echo(f"Using eval config: {config_path}")
|
|
181
|
-
|
|
182
|
-
if config_path:
|
|
183
|
-
if _toml is None:
|
|
184
|
-
raise TomlUnavailableError()
|
|
185
|
-
if not config_path.exists():
|
|
186
|
-
raise EvalConfigNotFoundError(str(config_path))
|
|
187
|
-
try:
|
|
188
|
-
data = config_path.read_bytes()
|
|
189
|
-
parsed = _toml.loads(data.decode("utf-8"))
|
|
190
|
-
if isinstance(parsed, dict):
|
|
191
|
-
section = parsed.get("eval")
|
|
192
|
-
if section is None:
|
|
193
|
-
cfg = dict(parsed)
|
|
194
|
-
elif isinstance(section, dict):
|
|
195
|
-
cfg = dict(section)
|
|
196
|
-
else:
|
|
197
|
-
raise MissingEvalTableError()
|
|
198
|
-
except Exception as exc:
|
|
199
|
-
raise EvalConfigParseError(path=str(config_path), detail=str(exc)) from exc
|
|
200
|
-
|
|
201
|
-
if cfg:
|
|
202
|
-
try:
|
|
203
|
-
normalized_cfg = validate_eval_options(cfg)
|
|
204
|
-
normalized_cfg_dict = dict(normalized_cfg)
|
|
205
|
-
eval_cfg = EvalConfig.from_dict(normalized_cfg_dict)
|
|
206
|
-
cfg = normalized_cfg_dict
|
|
207
|
-
click.echo(f"✓ Config validated: {len(eval_cfg.seeds)} seeds, model={eval_cfg.model}")
|
|
208
|
-
except (ValueError, TypeError) as validation_error:
|
|
209
|
-
raise InvalidEvalConfigError(detail=str(validation_error)) from validation_error
|
|
210
|
-
else:
|
|
211
|
-
cfg = {}
|
|
212
|
-
|
|
213
|
-
# CLI args override config
|
|
214
|
-
if eval_cfg:
|
|
215
|
-
app_id = app_id or eval_cfg.app_id
|
|
216
|
-
else:
|
|
217
|
-
app_id = app_id or (cfg.get("app_id") if isinstance(cfg.get("app_id"), str) else None) # type: ignore
|
|
218
|
-
|
|
219
|
-
metadata_filters: dict[str, str] = {}
|
|
220
|
-
if eval_cfg:
|
|
221
|
-
metadata_filters.update(eval_cfg.metadata)
|
|
222
|
-
else:
|
|
223
|
-
cfg_metadata = cfg.get("metadata")
|
|
224
|
-
if isinstance(cfg_metadata, dict):
|
|
225
|
-
for key, value in cfg_metadata.items():
|
|
226
|
-
metadata_filters[str(key)] = str(value)
|
|
227
|
-
elif isinstance(cfg_metadata, list):
|
|
228
|
-
for item in cfg_metadata:
|
|
229
|
-
if isinstance(item, str) and "=" in item:
|
|
230
|
-
key, value = item.split("=", 1)
|
|
231
|
-
metadata_filters[key.strip()] = value.strip()
|
|
232
|
-
|
|
233
|
-
for item in metadata or ():
|
|
234
|
-
if "=" not in item:
|
|
235
|
-
raise MetadataFilterFormatError(entry=item)
|
|
236
|
-
key, value = item.split("=", 1)
|
|
237
|
-
key = key.strip()
|
|
238
|
-
value = value.strip()
|
|
239
|
-
if not key or not value:
|
|
240
|
-
raise MetadataFilterFormatError(entry=item)
|
|
241
|
-
metadata_filters[key] = value
|
|
242
|
-
|
|
243
|
-
metadata_sql_query: str | None = None
|
|
244
|
-
if eval_cfg and eval_cfg.metadata_sql:
|
|
245
|
-
metadata_sql_query = eval_cfg.metadata_sql
|
|
246
|
-
else:
|
|
247
|
-
cfg_metadata_sql = cfg.get("metadata_sql")
|
|
248
|
-
if isinstance(cfg_metadata_sql, dict):
|
|
249
|
-
metadata_sql_query = cfg_metadata_sql.get("query") or cfg_metadata_sql.get("sql")
|
|
250
|
-
elif isinstance(cfg_metadata_sql, str):
|
|
251
|
-
metadata_sql_query = cfg_metadata_sql
|
|
252
|
-
|
|
253
|
-
if metadata_sql:
|
|
254
|
-
metadata_sql_query = metadata_sql
|
|
255
|
-
if metadata_sql_query is not None:
|
|
256
|
-
metadata_sql_query = str(metadata_sql_query)
|
|
257
|
-
|
|
258
|
-
trace_db_url: str | None = None
|
|
259
|
-
trace_db = (trace_db or "").strip()
|
|
260
|
-
if trace_db and trace_db.lower() not in {"none", "off", "disable"}:
|
|
261
|
-
if "://" in trace_db:
|
|
262
|
-
trace_db_url = trace_db
|
|
263
|
-
else:
|
|
264
|
-
trace_path = Path(trace_db).expanduser()
|
|
265
|
-
trace_path.parent.mkdir(parents=True, exist_ok=True)
|
|
266
|
-
trace_db_url = f"sqlite+aiosqlite:///{trace_path}"
|
|
267
|
-
trace_tracer: SessionTracer | None = None
|
|
268
|
-
if trace_db_url and session_tracer_cls is not None:
|
|
269
|
-
trace_tracer = cast(SessionTracer, session_tracer_cls(db_url=trace_db_url, auto_save=True))
|
|
270
|
-
|
|
271
|
-
# Determine selection params (CLI takes precedence; TOML only fills unset model/seeds/env)
|
|
272
|
-
if cfg.get("model") and not model:
|
|
273
|
-
model = str(cfg["model"]) # type: ignore[index]
|
|
274
|
-
if cfg.get("seeds") and seeds == "0,1,2,3,4":
|
|
275
|
-
val = cfg["seeds"]
|
|
276
|
-
if isinstance(val, list):
|
|
277
|
-
with contextlib.suppress(Exception):
|
|
278
|
-
seeds = ",".join(str(int(x)) for x in val)
|
|
279
|
-
elif isinstance(val, str):
|
|
280
|
-
seeds = val
|
|
281
|
-
elif isinstance(val, int):
|
|
282
|
-
seeds = str(val)
|
|
283
|
-
if cfg.get("env_file") and not env_file:
|
|
284
|
-
ef = cfg["env_file"]
|
|
285
|
-
if isinstance(ef, str):
|
|
286
|
-
env_file = (ef,) # type: ignore[assignment]
|
|
287
|
-
elif isinstance(ef, list):
|
|
288
|
-
env_file = tuple(str(x) for x in ef) # type: ignore[assignment]
|
|
289
|
-
|
|
290
|
-
choice_for_env: AppChoice | None = None
|
|
291
|
-
entry: TaskAppEntryType | None = None
|
|
292
|
-
if task_app_url is None:
|
|
293
|
-
choice_for_env = select_app_choice(app_id, purpose="eval")
|
|
294
|
-
entry = choice_for_env.ensure_entry()
|
|
295
|
-
|
|
296
|
-
env_paths: list[Path] = []
|
|
297
|
-
if entry is not None:
|
|
298
|
-
original_env_path = choice_for_env.path if choice_for_env is not None else None
|
|
299
|
-
env_paths = determine_env_files(entry, env_file, original_path=original_env_path)
|
|
300
|
-
else:
|
|
301
|
-
if not env_file:
|
|
302
|
-
raise click.ClickException("--env-file is required when using --url")
|
|
303
|
-
for candidate in env_file:
|
|
304
|
-
p = Path(candidate).expanduser()
|
|
305
|
-
if not p.exists():
|
|
306
|
-
raise click.ClickException(f"Env file not found: {p}")
|
|
307
|
-
env_paths.append(p)
|
|
308
|
-
|
|
309
|
-
click.echo("Using env file(s): " + ", ".join(str(p) for p in env_paths))
|
|
310
|
-
load_env_files_into_process([str(Path(p)) for p in env_paths])
|
|
311
|
-
|
|
312
|
-
if task_app_url is None:
|
|
313
|
-
config = entry.config_factory() # type: ignore[union-attr]
|
|
314
|
-
# Help the type checker; runtime check also enforced in server.run_task_app
|
|
315
|
-
if not isinstance(config, task_app_config_type):
|
|
316
|
-
raise click.ClickException(
|
|
317
|
-
"Invalid task app: config_factory did not return TaskAppConfig"
|
|
318
|
-
)
|
|
319
|
-
app = create_task_app(config)
|
|
320
|
-
|
|
321
|
-
# Determine supported models
|
|
322
|
-
inference_meta: dict[str, Any] = {}
|
|
323
|
-
supported: list[str] = []
|
|
324
|
-
seen_models: set[str] = set()
|
|
325
|
-
|
|
326
|
-
def _add_supported_model(candidate: Any) -> None:
|
|
327
|
-
if not candidate:
|
|
328
|
-
return
|
|
329
|
-
text = str(candidate).strip()
|
|
330
|
-
if not text or text in seen_models:
|
|
331
|
-
return
|
|
332
|
-
supported.append(text)
|
|
333
|
-
seen_models.add(text)
|
|
334
|
-
|
|
335
|
-
if task_app_url is None:
|
|
336
|
-
try:
|
|
337
|
-
if hasattr(config, "base_task_info") and config.base_task_info:
|
|
338
|
-
inf_obj = getattr(config.base_task_info, "inference", None)
|
|
339
|
-
if inf_obj is not None:
|
|
340
|
-
if hasattr(inf_obj, "model_dump"):
|
|
341
|
-
inference_meta = dict(inf_obj.model_dump(exclude_none=True)) # type: ignore[attr-defined]
|
|
342
|
-
elif isinstance(inf_obj, dict):
|
|
343
|
-
inference_meta = dict(inf_obj)
|
|
344
|
-
except Exception:
|
|
345
|
-
inference_meta = {}
|
|
346
|
-
else:
|
|
347
|
-
try:
|
|
348
|
-
import httpx as _hx
|
|
349
|
-
|
|
350
|
-
headers = {}
|
|
351
|
-
api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
|
|
352
|
-
if api_key:
|
|
353
|
-
headers["X-API-Key"] = api_key
|
|
354
|
-
with _hx.Client(base_url=task_app_url, headers=headers, timeout=15.0) as c:
|
|
355
|
-
info = c.get("/info").json()
|
|
356
|
-
inf = info.get("inference") if isinstance(info, dict) else None
|
|
357
|
-
if isinstance(inf, dict):
|
|
358
|
-
inference_meta = dict(inf)
|
|
359
|
-
except Exception:
|
|
360
|
-
inference_meta = {}
|
|
361
|
-
|
|
362
|
-
default_model = inference_meta.get("model")
|
|
363
|
-
if isinstance(default_model, str):
|
|
364
|
-
_add_supported_model(default_model)
|
|
365
|
-
|
|
366
|
-
models_field = inference_meta.get("models")
|
|
367
|
-
if isinstance(models_field, list):
|
|
368
|
-
for candidate in models_field:
|
|
369
|
-
_add_supported_model(candidate)
|
|
370
|
-
|
|
371
|
-
supported_models = inference_meta.get("supported_models")
|
|
372
|
-
if isinstance(supported_models, list):
|
|
373
|
-
for candidate in supported_models:
|
|
374
|
-
_add_supported_model(candidate)
|
|
375
|
-
|
|
376
|
-
providers = inference_meta.get("providers")
|
|
377
|
-
if isinstance(providers, list):
|
|
378
|
-
if "openai" in providers:
|
|
379
|
-
_add_supported_model("gpt-5")
|
|
380
|
-
if "groq" in providers:
|
|
381
|
-
_add_supported_model("groq:llama-3.1-70b-versatile")
|
|
382
|
-
|
|
383
|
-
_add_supported_model("synth:qwen-0.6b")
|
|
384
|
-
|
|
385
|
-
selected_model = model
|
|
386
|
-
if not selected_model:
|
|
387
|
-
if not supported:
|
|
388
|
-
raise click.ClickException(
|
|
389
|
-
"No supported models; supply --model or add base_task_info.inference.model"
|
|
390
|
-
)
|
|
391
|
-
click.echo("Select model to evaluate:")
|
|
392
|
-
for idx, m in enumerate(supported, start=1):
|
|
393
|
-
click.echo(f" {idx}) {m}")
|
|
394
|
-
choice_idx = click.prompt("Enter choice", type=click.IntRange(1, len(supported)))
|
|
395
|
-
selected_model = supported[choice_idx - 1]
|
|
396
|
-
|
|
397
|
-
try:
|
|
398
|
-
seed_values = [int(s.strip()) for s in seeds.split(",") if s.strip()]
|
|
399
|
-
except Exception as exc:
|
|
400
|
-
raise SeedParseError(value=seeds) from exc
|
|
401
|
-
|
|
402
|
-
import httpx
|
|
403
|
-
|
|
404
|
-
headers = {}
|
|
405
|
-
api_key = (os.environ.get("ENVIRONMENT_API_KEY") or "").strip()
|
|
406
|
-
if api_key:
|
|
407
|
-
headers["X-API-Key"] = api_key
|
|
408
|
-
|
|
409
|
-
# Precompute optional policy overrides from TOML
|
|
410
|
-
policy_overrides: dict[str, Any] = {}
|
|
411
|
-
try:
|
|
412
|
-
# Accept [eval.policy] table or top-level keys for convenience
|
|
413
|
-
if isinstance(cfg.get("policy"), dict):
|
|
414
|
-
policy_overrides.update(dict(cfg["policy"]))
|
|
415
|
-
# Back-compat: allow temperature/max_tokens at top level
|
|
416
|
-
for k in (
|
|
417
|
-
"temperature",
|
|
418
|
-
"max_tokens",
|
|
419
|
-
"reasoning_effort",
|
|
420
|
-
"system_hint",
|
|
421
|
-
"tool_choice",
|
|
422
|
-
"inference_url",
|
|
423
|
-
):
|
|
424
|
-
if k in cfg and k not in policy_overrides:
|
|
425
|
-
policy_overrides[k] = cfg.get(k)
|
|
426
|
-
except Exception:
|
|
427
|
-
policy_overrides = {}
|
|
428
|
-
|
|
429
|
-
raw_concurrency = cfg.get("concurrency")
|
|
156
|
+
config_file = Path(config_path) if config_path else None
|
|
157
|
+
if config_file and not config_file.exists():
|
|
158
|
+
raise click.ClickException("Eval config not found")
|
|
159
|
+
|
|
160
|
+
options = {
|
|
161
|
+
"app_id": app_id or "",
|
|
162
|
+
"model": model,
|
|
163
|
+
"config": config_path,
|
|
164
|
+
"trace_db": trace_db,
|
|
165
|
+
"metadata": list(metadata),
|
|
166
|
+
"seeds": seeds,
|
|
167
|
+
"url": url,
|
|
168
|
+
"backend": backend,
|
|
169
|
+
"env_file": env_file,
|
|
170
|
+
"ops": ops,
|
|
171
|
+
"return_trace": return_trace,
|
|
172
|
+
"concurrency": concurrency,
|
|
173
|
+
"poll": poll,
|
|
174
|
+
}
|
|
430
175
|
try:
|
|
431
|
-
|
|
432
|
-
except
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
spec.loader.exec_module(module)
|
|
465
|
-
except click.ClickException:
|
|
466
|
-
raise
|
|
467
|
-
except Exception as exc:
|
|
468
|
-
raise click.ClickException(f"Unable to load judge module: {exc}") from exc
|
|
469
|
-
|
|
470
|
-
if judge_callable_name:
|
|
471
|
-
try:
|
|
472
|
-
judge_fn = getattr(module, str(judge_callable_name))
|
|
473
|
-
except AttributeError as exc:
|
|
474
|
-
raise click.ClickException(
|
|
475
|
-
f"Judge callable '{judge_callable_name}' not found in module"
|
|
476
|
-
) from exc
|
|
477
|
-
else:
|
|
478
|
-
if hasattr(module, "judge"):
|
|
479
|
-
judge_fn = module.judge
|
|
480
|
-
else:
|
|
481
|
-
raise click.ClickException("Judge module must expose 'judge' callable")
|
|
482
|
-
|
|
483
|
-
if not callable(judge_fn):
|
|
484
|
-
raise click.ClickException("Judge callable is not callable")
|
|
485
|
-
|
|
486
|
-
judge_kwargs = {
|
|
487
|
-
k: v
|
|
488
|
-
for k, v in judge_cfg.items()
|
|
489
|
-
if k not in {"module", "path", "callable", "function", "name"}
|
|
490
|
-
}
|
|
491
|
-
display_name = str(
|
|
492
|
-
judge_cfg.get("name")
|
|
493
|
-
or name_hint
|
|
494
|
-
or f"judge{len(judge_specs) + 1}"
|
|
495
|
-
)
|
|
496
|
-
judge_specs.append(judge_spec_cls(display_name, judge_fn, judge_kwargs))
|
|
497
|
-
|
|
498
|
-
raw_judge_cfg = cfg.get("judge")
|
|
499
|
-
if isinstance(raw_judge_cfg, dict) and raw_judge_cfg:
|
|
500
|
-
direct_keys = {"module", "path", "callable", "function", "name"}
|
|
501
|
-
has_direct_keys = any(key in raw_judge_cfg for key in direct_keys)
|
|
502
|
-
nested_candidates = [
|
|
503
|
-
(key, value)
|
|
504
|
-
for key, value in raw_judge_cfg.items()
|
|
505
|
-
if isinstance(value, dict)
|
|
506
|
-
]
|
|
507
|
-
if has_direct_keys and not nested_candidates:
|
|
508
|
-
_register_judge(None, raw_judge_cfg)
|
|
509
|
-
else:
|
|
510
|
-
for sub_name, sub_cfg in nested_candidates:
|
|
511
|
-
_register_judge(sub_name, sub_cfg)
|
|
512
|
-
|
|
513
|
-
raw_judges_list = cfg.get("judges")
|
|
514
|
-
if isinstance(raw_judges_list, list):
|
|
515
|
-
for _index, entry in enumerate(raw_judges_list, start=1):
|
|
516
|
-
if isinstance(entry, dict):
|
|
517
|
-
_register_judge(entry.get("name") or f"judge{len(judge_specs) + 1}", entry)
|
|
518
|
-
|
|
519
|
-
records: list[dict[str, Any]] = []
|
|
520
|
-
|
|
521
|
-
successes = 0
|
|
522
|
-
failures = 0
|
|
523
|
-
# Aggregate outcome stats across successful seeds
|
|
524
|
-
outcome_sum: float = 0.0
|
|
525
|
-
outcome_count: int = 0
|
|
526
|
-
outcome_correct: int = 0
|
|
527
|
-
|
|
528
|
-
def _build_task_rows(taskset: Any) -> dict[int, dict[str, Any]]:
|
|
529
|
-
rows: dict[int, dict[str, Any]] = {}
|
|
530
|
-
if not isinstance(taskset, dict):
|
|
531
|
-
return rows
|
|
532
|
-
|
|
533
|
-
scenario_ids = taskset.get("scenario_ids") or []
|
|
534
|
-
loop_ids = taskset.get("loop_ids") or []
|
|
535
|
-
thread_ids = taskset.get("thread_ids") or []
|
|
536
|
-
difficulty_map = taskset.get("difficulty_map") or {}
|
|
537
|
-
|
|
538
|
-
max_len = max(len(scenario_ids), len(loop_ids), len(thread_ids))
|
|
539
|
-
for seed in range(max_len):
|
|
540
|
-
scenario_id = scenario_ids[seed] if seed < len(scenario_ids) else None
|
|
541
|
-
loop_id = loop_ids[seed] if seed < len(loop_ids) else None
|
|
542
|
-
thread_id = thread_ids[seed] if seed < len(thread_ids) else None
|
|
543
|
-
difficulty = None
|
|
544
|
-
if isinstance(difficulty_map, dict):
|
|
545
|
-
if scenario_id and scenario_id in difficulty_map:
|
|
546
|
-
difficulty = difficulty_map.get(scenario_id)
|
|
547
|
-
elif str(seed) in difficulty_map:
|
|
548
|
-
difficulty = difficulty_map.get(str(seed))
|
|
549
|
-
|
|
550
|
-
rows[seed] = {
|
|
551
|
-
"seed": seed,
|
|
552
|
-
"scenario_id": scenario_id,
|
|
553
|
-
"loop_id": loop_id,
|
|
554
|
-
"thread_id": thread_id,
|
|
555
|
-
"difficulty": difficulty,
|
|
556
|
-
}
|
|
557
|
-
return rows
|
|
558
|
-
|
|
559
|
-
def _apply_metadata_filters(
|
|
560
|
-
rows: dict[int, dict[str, Any]], seeds_list: list[int], filters: dict[str, str]
|
|
561
|
-
) -> list[int]:
|
|
562
|
-
if not filters:
|
|
563
|
-
return seeds_list
|
|
564
|
-
filtered: list[int] = []
|
|
565
|
-
for seed in seeds_list:
|
|
566
|
-
row = rows.get(seed)
|
|
567
|
-
if not row:
|
|
568
|
-
continue
|
|
569
|
-
include = True
|
|
570
|
-
for key, expected in filters.items():
|
|
571
|
-
actual = row.get(key)
|
|
572
|
-
if actual is None:
|
|
573
|
-
include = False
|
|
574
|
-
break
|
|
575
|
-
if str(actual).lower() != expected.lower():
|
|
576
|
-
include = False
|
|
577
|
-
break
|
|
578
|
-
if include:
|
|
579
|
-
filtered.append(seed)
|
|
580
|
-
return filtered
|
|
581
|
-
|
|
582
|
-
def _apply_metadata_sql(
|
|
583
|
-
rows: dict[int, dict[str, Any]], seeds_list: list[int], query: str
|
|
584
|
-
) -> list[int]:
|
|
585
|
-
"""Return seeds that satisfy an arbitrary SQL query.
|
|
586
|
-
|
|
587
|
-
The query is executed against an in-memory SQLite table named `tasks`
|
|
588
|
-
with columns (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT).
|
|
589
|
-
Any rows whose `seed` value (or first column if `seed` is absent) appear in the result set are retained.
|
|
590
|
-
"""
|
|
591
|
-
if not query:
|
|
592
|
-
return seeds_list
|
|
593
|
-
conn = sqlite3.connect(":memory:")
|
|
594
|
-
try:
|
|
595
|
-
cur = conn.cursor()
|
|
596
|
-
cur.execute(
|
|
597
|
-
"CREATE TABLE tasks (seed INTEGER, scenario_id TEXT, loop_id TEXT, thread_id TEXT, difficulty TEXT)"
|
|
598
|
-
)
|
|
599
|
-
insert_stmt = (
|
|
600
|
-
"INSERT INTO tasks (seed, scenario_id, loop_id, thread_id, difficulty) VALUES (?,?,?,?,?)"
|
|
601
|
-
)
|
|
602
|
-
for seed in seeds_list:
|
|
603
|
-
row = rows.get(seed, {})
|
|
604
|
-
cur.execute(
|
|
605
|
-
insert_stmt,
|
|
606
|
-
[
|
|
607
|
-
seed,
|
|
608
|
-
row.get("scenario_id"),
|
|
609
|
-
row.get("loop_id"),
|
|
610
|
-
row.get("thread_id"),
|
|
611
|
-
row.get("difficulty"),
|
|
612
|
-
],
|
|
613
|
-
)
|
|
614
|
-
|
|
615
|
-
result = cur.execute(query)
|
|
616
|
-
fetched = result.fetchall()
|
|
617
|
-
if not fetched:
|
|
618
|
-
return []
|
|
619
|
-
description = result.description or []
|
|
620
|
-
col_names = [col[0] for col in description]
|
|
621
|
-
seeds_out: list[int] = []
|
|
622
|
-
for entry in fetched:
|
|
623
|
-
value = entry[col_names.index("seed")] if "seed" in col_names else entry[0]
|
|
624
|
-
try:
|
|
625
|
-
seeds_out.append(int(value))
|
|
626
|
-
except Exception as exc:
|
|
627
|
-
raise MetadataSQLResultError(
|
|
628
|
-
query=query,
|
|
629
|
-
detail="non-integer value returned",
|
|
630
|
-
) from exc
|
|
631
|
-
seeds_set = set(seeds_out)
|
|
632
|
-
return [seed for seed in seeds_list if seed in seeds_set]
|
|
633
|
-
except sqlite3.Error as exc:
|
|
634
|
-
raise MetadataSQLExecutionError(query=query, detail=str(exc)) from exc
|
|
635
|
-
finally:
|
|
636
|
-
conn.close()
|
|
637
|
-
|
|
638
|
-
async def _run_eval() -> None:
|
|
639
|
-
nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records, seed_values
|
|
640
|
-
|
|
641
|
-
if trace_tracer is not None and trace_tracer.db is None:
|
|
642
|
-
await trace_tracer.initialize()
|
|
643
|
-
|
|
644
|
-
if task_app_url is None:
|
|
645
|
-
transport = httpx.ASGITransport(app=app) # type: ignore[name-defined]
|
|
646
|
-
async_client = httpx.AsyncClient(
|
|
647
|
-
transport=cast(Any, transport),
|
|
648
|
-
base_url="http://eval.local",
|
|
649
|
-
timeout=300.0,
|
|
650
|
-
follow_redirects=True,
|
|
651
|
-
headers=headers,
|
|
652
|
-
)
|
|
653
|
-
else:
|
|
654
|
-
async_client = httpx.AsyncClient(
|
|
655
|
-
base_url=task_app_url,
|
|
656
|
-
timeout=300.0,
|
|
657
|
-
follow_redirects=True,
|
|
658
|
-
headers=headers,
|
|
659
|
-
)
|
|
660
|
-
|
|
661
|
-
try:
|
|
662
|
-
taskset_payload: dict[str, Any] | None = None
|
|
663
|
-
try:
|
|
664
|
-
task_info_response = await async_client.get("/task_info")
|
|
665
|
-
except Exception:
|
|
666
|
-
task_info_response = None
|
|
667
|
-
if task_info_response is not None and task_info_response.status_code == 200:
|
|
668
|
-
with contextlib.suppress(Exception):
|
|
669
|
-
payload_json = task_info_response.json()
|
|
670
|
-
if isinstance(payload_json, dict) and "taskset" in payload_json:
|
|
671
|
-
taskset_payload = payload_json.get("taskset")
|
|
672
|
-
if not isinstance(taskset_payload, dict):
|
|
673
|
-
taskset_payload = None
|
|
674
|
-
elif isinstance(payload_json, dict):
|
|
675
|
-
taskset_payload = payload_json
|
|
676
|
-
|
|
677
|
-
available_seeds = list(seed_values)
|
|
678
|
-
if metadata_sql_query or metadata_filters:
|
|
679
|
-
if not taskset_payload:
|
|
680
|
-
raise TaskInfoUnavailableError()
|
|
681
|
-
rows = _build_task_rows(taskset_payload)
|
|
682
|
-
if metadata_sql_query:
|
|
683
|
-
available_seeds = _apply_metadata_sql(rows, available_seeds, metadata_sql_query)
|
|
684
|
-
if metadata_filters:
|
|
685
|
-
available_seeds = _apply_metadata_filters(rows, available_seeds, metadata_filters)
|
|
686
|
-
if not available_seeds:
|
|
687
|
-
raise NoSeedsMatchedError()
|
|
688
|
-
seed_values = available_seeds
|
|
689
|
-
|
|
690
|
-
semaphore = asyncio.Semaphore(concurrency_limit)
|
|
691
|
-
|
|
692
|
-
async def _run_seed(seed_val: int) -> None:
|
|
693
|
-
nonlocal successes, failures, outcome_sum, outcome_count, outcome_correct, records
|
|
694
|
-
# Read env_name and policy_name from config if available
|
|
695
|
-
env_name = cfg.get("env_name") or (cfg.get("env", {}).get("env_name") if isinstance(cfg.get("env"), dict) else None)
|
|
696
|
-
policy_name = cfg.get("policy_name") or (cfg.get("policy", {}).get("policy_name") if isinstance(cfg.get("policy"), dict) else None)
|
|
697
|
-
env_config_overrides = cfg.get("env_config", {}) if isinstance(cfg.get("env_config"), dict) else {}
|
|
698
|
-
policy_config_overrides = cfg.get("policy_config", {}) if isinstance(cfg.get("policy_config"), dict) else {}
|
|
699
|
-
|
|
700
|
-
# Debug: print config parsing
|
|
701
|
-
if seed_val == 0:
|
|
702
|
-
click.echo(f"[DEBUG] env_name from config: {env_name}")
|
|
703
|
-
click.echo(f"[DEBUG] policy_name from config: {policy_name}")
|
|
704
|
-
|
|
705
|
-
# Generate default ops sequence if not provided
|
|
706
|
-
max_llm_calls = policy_config_overrides.get("max_llm_calls", 10)
|
|
707
|
-
ops_list = cfg.get("ops", [])
|
|
708
|
-
if not ops_list:
|
|
709
|
-
# Generate default "agent, env" pairs for max_llm_calls
|
|
710
|
-
ops_list = ["agent", "env"] * int(max_llm_calls)
|
|
711
|
-
|
|
712
|
-
body = {
|
|
713
|
-
"run_id": str(uuid.uuid4()),
|
|
714
|
-
"env": {"config": {"split": split, "index": seed_val, **env_config_overrides}, "seed": seed_val},
|
|
715
|
-
"policy": {
|
|
716
|
-
"policy_name": policy_name or selected_model,
|
|
717
|
-
"config": {"model": selected_model, **policy_overrides, **policy_config_overrides},
|
|
718
|
-
},
|
|
719
|
-
"ops": ops_list,
|
|
720
|
-
"record": {
|
|
721
|
-
"return_trace": cfg.get("return_trace", True),
|
|
722
|
-
"trace_format": cfg.get("trace_format", "structured"),
|
|
723
|
-
},
|
|
724
|
-
"mode": "eval", # RolloutMode.EVAL: use inference URLs as-is, no transformations
|
|
725
|
-
}
|
|
726
|
-
if env_name:
|
|
727
|
-
env_section = body.get("env")
|
|
728
|
-
if isinstance(env_section, dict):
|
|
729
|
-
env_section["env_name"] = env_name
|
|
730
|
-
else:
|
|
731
|
-
body["env"] = {"env_name": env_name}
|
|
732
|
-
|
|
733
|
-
# Debug: print the body being sent
|
|
734
|
-
if seed_val == 0:
|
|
735
|
-
click.echo(f"[DEBUG] rollout body env: {body['env']}")
|
|
736
|
-
click.echo(f"[DEBUG] rollout body policy: {body['policy']}")
|
|
737
|
-
click.echo(f"[DEBUG] rollout body mode: {body.get('mode', 'NOT SET')}")
|
|
738
|
-
rollout_elapsed: float | None = None
|
|
739
|
-
rollout_start = time.perf_counter()
|
|
740
|
-
try:
|
|
741
|
-
import logging
|
|
742
|
-
_log = logging.getLogger(__name__)
|
|
743
|
-
_log.info(f"[EVAL_BODY_DEBUG] Sending body with mode={body.get('mode')}")
|
|
744
|
-
async with semaphore:
|
|
745
|
-
response = await async_client.post("/rollout", json=body)
|
|
746
|
-
rollout_elapsed = time.perf_counter() - rollout_start
|
|
747
|
-
except Exception as exc:
|
|
748
|
-
failures += 1
|
|
749
|
-
click.echo(f"seed={seed_val} error={exc}")
|
|
750
|
-
return
|
|
751
|
-
|
|
752
|
-
ok = 200 <= response.status_code < 300
|
|
753
|
-
if ok:
|
|
754
|
-
successes += 1
|
|
755
|
-
else:
|
|
756
|
-
failures += 1
|
|
757
|
-
|
|
758
|
-
summary = [f"seed={seed_val}", f"status={response.status_code}"]
|
|
759
|
-
data: Any
|
|
760
|
-
try:
|
|
761
|
-
data = response.json()
|
|
762
|
-
except Exception:
|
|
763
|
-
data = None
|
|
764
|
-
|
|
765
|
-
# Debug: print validation errors
|
|
766
|
-
if response.status_code == 422 and data:
|
|
767
|
-
click.echo(f"[DEBUG] 422 Validation Error: {data}")
|
|
768
|
-
|
|
769
|
-
metrics: dict[str, Any] | None = None
|
|
770
|
-
completion: str | None = None
|
|
771
|
-
prompt_index: int | None = None
|
|
772
|
-
prompt_text: str | None = None
|
|
773
|
-
task_id: str | None = None
|
|
774
|
-
task_split: str | None = None
|
|
775
|
-
task_rubric_id: str | None = None
|
|
776
|
-
|
|
777
|
-
trace_namespace: dict[str, Any] | None = None
|
|
778
|
-
session_trace_dict: dict[str, Any] | None = None
|
|
779
|
-
|
|
780
|
-
if isinstance(data, dict):
|
|
781
|
-
import logging
|
|
782
|
-
_logger = logging.getLogger(__name__)
|
|
783
|
-
_logger.info(f"[EVAL_DEBUG] Response data keys: {list(data.keys())}")
|
|
784
|
-
if "detail" in data:
|
|
785
|
-
_logger.error(f"[EVAL_DEBUG] Task app returned error: {data['detail']}")
|
|
786
|
-
trace_namespace = data.get("trace")
|
|
787
|
-
_logger.info(f"[EVAL_DEBUG] trace_namespace type: {type(trace_namespace)}, value: {trace_namespace if not isinstance(trace_namespace, dict) else 'dict with keys: ' + str(list(trace_namespace.keys()) if trace_namespace else 'None')}")
|
|
788
|
-
if not isinstance(trace_namespace, dict):
|
|
789
|
-
raise RuntimeError(
|
|
790
|
-
"The 'synth-ai eval' command requires trace payloads in rollout responses. "
|
|
791
|
-
"Ensure the rollout request includes 'trace_format': 'structured' and 'return_trace': true, "
|
|
792
|
-
"and that task app tracing is enabled (TASKAPP_TRACING_ENABLED=1). "
|
|
793
|
-
"Note: This is specific to the eval command - general rollout endpoints don't require traces."
|
|
794
|
-
)
|
|
795
|
-
# Handle both "compact" and "full" trace formats:
|
|
796
|
-
# - compact: trace_namespace contains {session_id, metadata, ...}
|
|
797
|
-
# - full: trace_namespace IS the full session_trace dict
|
|
798
|
-
session_trace_dict = trace_namespace.get("session_trace")
|
|
799
|
-
if not isinstance(session_trace_dict, dict):
|
|
800
|
-
# If no session_trace key, assume "full" format where trace itself is the session_trace
|
|
801
|
-
if "session_id" in trace_namespace:
|
|
802
|
-
session_trace_dict = trace_namespace
|
|
803
|
-
else:
|
|
804
|
-
raise RuntimeError(
|
|
805
|
-
"The 'synth-ai eval' command requires 'session_trace' in the trace payload or a valid full trace format. "
|
|
806
|
-
"Ensure the task app is using tracing_v3 and returning structured trace data."
|
|
807
|
-
)
|
|
808
|
-
metrics = data.get("metrics") if isinstance(data.get("metrics"), dict) else None
|
|
809
|
-
if metrics:
|
|
810
|
-
mean_return = metrics.get("mean_return") or metrics.get("total_reward")
|
|
811
|
-
outcome = metrics.get("outcome_score")
|
|
812
|
-
if mean_return is not None:
|
|
813
|
-
summary.append(f"mean_return={mean_return}")
|
|
814
|
-
if outcome is not None:
|
|
815
|
-
summary.append(f"outcome={outcome}")
|
|
816
|
-
try:
|
|
817
|
-
val = float(outcome)
|
|
818
|
-
outcome_sum += val
|
|
819
|
-
outcome_count += 1
|
|
820
|
-
if val >= 0.5:
|
|
821
|
-
outcome_correct += 1
|
|
822
|
-
except Exception:
|
|
823
|
-
pass
|
|
824
|
-
trajs = (
|
|
825
|
-
data.get("trajectories")
|
|
826
|
-
if isinstance(data.get("trajectories"), list)
|
|
827
|
-
else None
|
|
828
|
-
)
|
|
829
|
-
if trajs:
|
|
830
|
-
first = trajs[0] if trajs else None
|
|
831
|
-
steps = first.get("steps") if isinstance(first, dict) else None
|
|
832
|
-
if isinstance(steps, list) and steps:
|
|
833
|
-
step0 = steps[0]
|
|
834
|
-
tool_calls = step0.get("tool_calls") or step0.get("tools") or []
|
|
835
|
-
if isinstance(tool_calls, list):
|
|
836
|
-
summary.append(f"tool_calls={len(tool_calls)}")
|
|
837
|
-
obs = step0.get("obs") if isinstance(step0, dict) else None
|
|
838
|
-
if isinstance(obs, dict):
|
|
839
|
-
idx_val = obs.get("prompt_index")
|
|
840
|
-
if isinstance(idx_val, int):
|
|
841
|
-
prompt_index = idx_val
|
|
842
|
-
prompt_raw = obs.get("prompt")
|
|
843
|
-
if isinstance(prompt_raw, str):
|
|
844
|
-
prompt_text = prompt_raw
|
|
845
|
-
if task_id is None:
|
|
846
|
-
candidate_id = obs.get("task_id")
|
|
847
|
-
if isinstance(candidate_id, str) and candidate_id:
|
|
848
|
-
task_id = candidate_id
|
|
849
|
-
if task_split is None:
|
|
850
|
-
candidate_split = obs.get("task_split")
|
|
851
|
-
if isinstance(candidate_split, str) and candidate_split:
|
|
852
|
-
task_split = candidate_split
|
|
853
|
-
if task_rubric_id is None:
|
|
854
|
-
candidate_rid = obs.get("task_rubric_id")
|
|
855
|
-
if isinstance(candidate_rid, str) and candidate_rid:
|
|
856
|
-
task_rubric_id = candidate_rid
|
|
857
|
-
final = first.get("final") if isinstance(first, dict) else None
|
|
858
|
-
if isinstance(final, dict):
|
|
859
|
-
final_obs = final.get("observation")
|
|
860
|
-
if isinstance(final_obs, dict):
|
|
861
|
-
comp_val = final_obs.get("completion")
|
|
862
|
-
if isinstance(comp_val, str):
|
|
863
|
-
completion = comp_val
|
|
864
|
-
if task_id is None:
|
|
865
|
-
candidate_id = final_obs.get("task_id")
|
|
866
|
-
if isinstance(candidate_id, str) and candidate_id:
|
|
867
|
-
task_id = candidate_id
|
|
868
|
-
if task_split is None:
|
|
869
|
-
candidate_split = final_obs.get("task_split")
|
|
870
|
-
if isinstance(candidate_split, str) and candidate_split:
|
|
871
|
-
task_split = candidate_split
|
|
872
|
-
if task_rubric_id is None:
|
|
873
|
-
candidate_rid = final_obs.get("task_rubric_id")
|
|
874
|
-
if isinstance(candidate_rid, str) and candidate_rid:
|
|
875
|
-
task_rubric_id = candidate_rid
|
|
876
|
-
final_info = final.get("info")
|
|
877
|
-
if isinstance(final_info, dict):
|
|
878
|
-
if task_id is None:
|
|
879
|
-
candidate_id = final_info.get("task_id")
|
|
880
|
-
if isinstance(candidate_id, str) and candidate_id:
|
|
881
|
-
task_id = candidate_id
|
|
882
|
-
if task_split is None:
|
|
883
|
-
candidate_split = final_info.get("task_split")
|
|
884
|
-
if isinstance(candidate_split, str) and candidate_split:
|
|
885
|
-
task_split = candidate_split
|
|
886
|
-
if task_rubric_id is None:
|
|
887
|
-
candidate_rid = final_info.get("task_rubric_id")
|
|
888
|
-
if isinstance(candidate_rid, str) and candidate_rid:
|
|
889
|
-
task_rubric_id = candidate_rid
|
|
890
|
-
if task_id:
|
|
891
|
-
summary.append(f"task_id={task_id}")
|
|
892
|
-
click.echo(" ".join(summary))
|
|
893
|
-
with contextlib.suppress(Exception):
|
|
894
|
-
click.echo(json.dumps(data, indent=2))
|
|
895
|
-
else:
|
|
896
|
-
click.echo(" ".join(summary))
|
|
897
|
-
|
|
898
|
-
official_score = None
|
|
899
|
-
if isinstance(metrics, dict):
|
|
900
|
-
for key in ("mean_return", "total_reward", "outcome_score"):
|
|
901
|
-
val = metrics.get(key)
|
|
902
|
-
if isinstance(val, int | float):
|
|
903
|
-
official_score = float(val)
|
|
904
|
-
break
|
|
905
|
-
if official_score is None and isinstance(data, dict):
|
|
906
|
-
try:
|
|
907
|
-
reward_val = data["trajectories"][0]["steps"][0].get("reward")
|
|
908
|
-
if isinstance(reward_val, int | float):
|
|
909
|
-
official_score = float(reward_val)
|
|
910
|
-
except Exception:
|
|
911
|
-
pass
|
|
912
|
-
|
|
913
|
-
if official_score is not None:
|
|
914
|
-
if official_score < 0.0:
|
|
915
|
-
official_score = 0.0
|
|
916
|
-
elif official_score > 1.0:
|
|
917
|
-
official_score = min(1.0, official_score)
|
|
918
|
-
|
|
919
|
-
judge_scores: dict[str, float | None] = {}
|
|
920
|
-
judges_timings: dict[str, float | None] = {}
|
|
921
|
-
timings: dict[str, Any] = {
|
|
922
|
-
"rollout_s": rollout_elapsed,
|
|
923
|
-
"judges": judges_timings,
|
|
924
|
-
}
|
|
925
|
-
if judge_specs:
|
|
926
|
-
for spec in judge_specs:
|
|
927
|
-
score_value: float | None = None
|
|
928
|
-
judge_elapsed: float | None = None
|
|
929
|
-
# Run judges for all tasks (text-based and trajectory-based)
|
|
930
|
-
# Text-based tasks have completion, trajectory-based tasks use response
|
|
931
|
-
judge_payload = {
|
|
932
|
-
"seed": seed_val,
|
|
933
|
-
"prompt_index": prompt_index,
|
|
934
|
-
"prompt": prompt_text,
|
|
935
|
-
"completion": completion,
|
|
936
|
-
"metrics": metrics,
|
|
937
|
-
"response": data,
|
|
938
|
-
"trace": trace_namespace,
|
|
939
|
-
}
|
|
940
|
-
try:
|
|
941
|
-
judge_start = time.perf_counter()
|
|
942
|
-
result = spec.fn(judge_payload, **spec.kwargs)
|
|
943
|
-
judge_elapsed = time.perf_counter() - judge_start
|
|
944
|
-
if isinstance(result, int | float):
|
|
945
|
-
score_value = float(result)
|
|
946
|
-
except Exception as exc:
|
|
947
|
-
if judge_elapsed is None:
|
|
948
|
-
judge_elapsed = time.perf_counter() - judge_start
|
|
949
|
-
click.echo(f"seed={seed_val} judge[{spec.name}]_error={exc}")
|
|
950
|
-
judges_timings[spec.name] = judge_elapsed
|
|
951
|
-
judge_scores[spec.name] = score_value
|
|
952
|
-
|
|
953
|
-
if trace_tracer is not None and trace_namespace:
|
|
954
|
-
storage_metadata = {
|
|
955
|
-
"eval_seed": seed_val,
|
|
956
|
-
"prompt_index": prompt_index,
|
|
957
|
-
"task_id": task_id,
|
|
958
|
-
"task_split": task_split,
|
|
959
|
-
"task_rubric_id": task_rubric_id,
|
|
960
|
-
"official_score": official_score,
|
|
961
|
-
"judge_scores": judge_scores,
|
|
962
|
-
"model": selected_model,
|
|
963
|
-
"prompt": prompt_text,
|
|
964
|
-
"completion": completion,
|
|
965
|
-
}
|
|
966
|
-
if store_trace is not None:
|
|
967
|
-
await store_trace(trace_tracer, trace_namespace, storage_metadata)
|
|
968
|
-
|
|
969
|
-
records.append(
|
|
970
|
-
{
|
|
971
|
-
"seed": seed_val,
|
|
972
|
-
"prompt_index": prompt_index,
|
|
973
|
-
"task_id": task_id,
|
|
974
|
-
"task_split": task_split,
|
|
975
|
-
"task_rubric_id": task_rubric_id,
|
|
976
|
-
"official_score": official_score,
|
|
977
|
-
"judge_scores": judge_scores,
|
|
978
|
-
"timings": timings,
|
|
979
|
-
}
|
|
980
|
-
)
|
|
981
|
-
|
|
982
|
-
await asyncio.gather(*[_run_seed(seed_val) for seed_val in seed_values])
|
|
983
|
-
finally:
|
|
984
|
-
await async_client.aclose()
|
|
985
|
-
|
|
986
|
-
try:
|
|
987
|
-
asyncio.run(_run_eval())
|
|
988
|
-
finally:
|
|
989
|
-
if trace_tracer is not None and trace_tracer.db is not None:
|
|
990
|
-
asyncio.run(trace_tracer.db.close())
|
|
991
|
-
|
|
992
|
-
click.echo(
|
|
993
|
-
f"Eval complete: {successes} ok, {failures} failed; model={selected_model}, split={split}"
|
|
176
|
+
normalized = validate_eval_options(options)
|
|
177
|
+
except ValueError as exc:
|
|
178
|
+
raise click.ClickException(str(exc)) from exc
|
|
179
|
+
|
|
180
|
+
if env_file:
|
|
181
|
+
load_dotenv(env_file, override=False)
|
|
182
|
+
|
|
183
|
+
from .config import resolve_eval_config
|
|
184
|
+
from .runner import format_eval_report, format_eval_table, run_eval, save_traces
|
|
185
|
+
|
|
186
|
+
output_json_path = output_path or output_json
|
|
187
|
+
|
|
188
|
+
# Auto-enable return_trace if traces-dir is provided
|
|
189
|
+
effective_return_trace = return_trace or bool(traces_dir)
|
|
190
|
+
|
|
191
|
+
resolved = resolve_eval_config(
|
|
192
|
+
config_path=config_file,
|
|
193
|
+
cli_app_id=str(normalized.get("app_id") or "") or None,
|
|
194
|
+
cli_model=str(normalized.get("model") or "") or None,
|
|
195
|
+
cli_seeds=normalized.get("seeds") or None,
|
|
196
|
+
cli_url=str(normalized.get("url") or "") or None,
|
|
197
|
+
cli_env_file=str(normalized.get("env_file") or "") or None,
|
|
198
|
+
cli_ops=normalized.get("ops") or None,
|
|
199
|
+
cli_return_trace=effective_return_trace,
|
|
200
|
+
cli_concurrency=normalized.get("concurrency") or None,
|
|
201
|
+
cli_output_txt=Path(output_txt) if output_txt else None,
|
|
202
|
+
cli_output_json=Path(output_json_path) if output_json_path else None,
|
|
203
|
+
cli_backend_url=str(normalized.get("backend") or "") or None,
|
|
204
|
+
cli_wait=bool(wait),
|
|
205
|
+
cli_poll_interval=float(normalized.get("poll") or 0) if normalized.get("poll") else None,
|
|
206
|
+
cli_traces_dir=Path(traces_dir) if traces_dir else None,
|
|
207
|
+
seed_set=seed_set,
|
|
208
|
+
metadata=normalized.get("metadata") or {},
|
|
994
209
|
)
|
|
995
210
|
|
|
996
|
-
if
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
click.
|
|
1000
|
-
|
|
1001
|
-
)
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
else:
|
|
1042
|
-
click.echo(" Pearson r: undefined (zero variance)")
|
|
1043
|
-
else:
|
|
1044
|
-
click.echo(" Pearson r: n/a (need ≥2 paired scores)")
|
|
1045
|
-
|
|
1046
|
-
header = ["Seed", "Prompt", "Official"]
|
|
1047
|
-
header.extend(spec.name for spec in judge_specs)
|
|
1048
|
-
rows: list[list[str]] = []
|
|
1049
|
-
for record in sorted(records, key=lambda r: (r["seed"], r.get("prompt_index") or -1)):
|
|
1050
|
-
seed_val = str(record["seed"])
|
|
1051
|
-
prompt_idx = (
|
|
1052
|
-
str(record["prompt_index"])
|
|
1053
|
-
if record["prompt_index"] is not None
|
|
1054
|
-
else "-"
|
|
1055
|
-
)
|
|
1056
|
-
official_val = (
|
|
1057
|
-
f"{record['official_score']:.3f}"
|
|
1058
|
-
if record["official_score"] is not None
|
|
1059
|
-
else "-"
|
|
1060
|
-
)
|
|
1061
|
-
row = [seed_val, prompt_idx, official_val]
|
|
1062
|
-
for spec in judge_specs:
|
|
1063
|
-
score_val = record["judge_scores"].get(spec.name)
|
|
1064
|
-
row.append(f"{score_val:.3f}" if isinstance(score_val, int | float) else "-")
|
|
1065
|
-
rows.append(row)
|
|
1066
|
-
|
|
1067
|
-
widths = [len(col) for col in header]
|
|
1068
|
-
for row in rows:
|
|
1069
|
-
for idx, cell in enumerate(row):
|
|
1070
|
-
widths[idx] = max(widths[idx], len(cell))
|
|
1071
|
-
|
|
1072
|
-
click.echo("")
|
|
1073
|
-
click.echo(" ".join(h.ljust(widths[idx]) for idx, h in enumerate(header)))
|
|
1074
|
-
click.echo(" ".join("-" * widths[idx] for idx in range(len(header))))
|
|
1075
|
-
for row in rows:
|
|
1076
|
-
click.echo(" ".join(cell.ljust(widths[idx]) for idx, cell in enumerate(row)))
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
command = eval_command
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
def get_command() -> click.Command:
|
|
1084
|
-
"""Return the Click command implementing task-app evaluation."""
|
|
1085
|
-
return command
|
|
211
|
+
if not resolved.task_app_url:
|
|
212
|
+
raise click.ClickException("task_app_url is required (provide via TOML or --url)")
|
|
213
|
+
if not resolved.env_name:
|
|
214
|
+
raise click.ClickException("env_name is required (provide via TOML)")
|
|
215
|
+
if not resolved.seeds:
|
|
216
|
+
raise click.ClickException("No seeds found (provide via TOML or --seeds)")
|
|
217
|
+
if not resolved.policy_config.get("model") and not model:
|
|
218
|
+
raise click.ClickException("policy model is required (set in TOML or --model)")
|
|
219
|
+
|
|
220
|
+
results = asyncio.run(run_eval(resolved))
|
|
221
|
+
if results:
|
|
222
|
+
table = format_eval_table(results)
|
|
223
|
+
click.echo(table)
|
|
224
|
+
|
|
225
|
+
report = format_eval_report(resolved, results)
|
|
226
|
+
if resolved.output_txt:
|
|
227
|
+
resolved.output_txt.write_text(report, encoding="utf-8")
|
|
228
|
+
click.echo(f"\nWrote report: {resolved.output_txt}")
|
|
229
|
+
if resolved.output_json:
|
|
230
|
+
# Exclude trace from JSON output (too large), but include token counts
|
|
231
|
+
results_data = []
|
|
232
|
+
for result in results:
|
|
233
|
+
result_dict = asdict(result)
|
|
234
|
+
result_dict.pop("trace", None) # Remove trace from JSON output
|
|
235
|
+
results_data.append(result_dict)
|
|
236
|
+
payload = {
|
|
237
|
+
"config": {
|
|
238
|
+
"app_id": resolved.app_id,
|
|
239
|
+
"task_app_url": resolved.task_app_url,
|
|
240
|
+
"env_name": resolved.env_name,
|
|
241
|
+
"policy_name": resolved.policy_name,
|
|
242
|
+
"policy_config": resolved.policy_config,
|
|
243
|
+
"seeds": resolved.seeds,
|
|
244
|
+
"ops": resolved.ops,
|
|
245
|
+
"concurrency": resolved.concurrency,
|
|
246
|
+
},
|
|
247
|
+
"results": results_data,
|
|
248
|
+
}
|
|
249
|
+
resolved.output_json.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
|
250
|
+
click.echo(f"Wrote JSON report: {resolved.output_json}")
|
|
251
|
+
|
|
252
|
+
# Save traces if traces-dir is provided
|
|
253
|
+
if traces_dir:
|
|
254
|
+
saved_count = save_traces(results, traces_dir)
|
|
255
|
+
click.echo(f"Saved {saved_count} traces to {traces_dir}")
|
|
1086
256
|
|
|
1087
257
|
|
|
1088
|
-
|
|
1089
|
-
if isinstance(err, TomlUnavailableError):
|
|
1090
|
-
hint = err.hint or "Install tomli or use Python 3.11+."
|
|
1091
|
-
return f"TOML parser not available. {hint}"
|
|
1092
|
-
if isinstance(err, EvalConfigNotFoundError):
|
|
1093
|
-
return f"Eval config not found: {err.path}"
|
|
1094
|
-
if isinstance(err, EvalConfigParseError):
|
|
1095
|
-
return f"Failed to parse TOML '{err.path}': {err.detail}"
|
|
1096
|
-
if isinstance(err, MissingEvalTableError):
|
|
1097
|
-
return "Config must contain an [eval] table."
|
|
1098
|
-
if isinstance(err, InvalidEvalConfigError):
|
|
1099
|
-
return f"Invalid eval config: {err.detail}"
|
|
1100
|
-
if isinstance(err, SeedParseError):
|
|
1101
|
-
return f"Unable to parse seeds from '{err.value}'. Provide comma-separated integers."
|
|
1102
|
-
if isinstance(err, MetadataFilterFormatError):
|
|
1103
|
-
return f"Metadata filter '{err.entry}' must be key=value."
|
|
1104
|
-
if isinstance(err, TaskInfoUnavailableError):
|
|
1105
|
-
return "Task metadata filters require the task app to expose /task_info metadata."
|
|
1106
|
-
if isinstance(err, NoSeedsMatchedError):
|
|
1107
|
-
hint = err.hint or "Adjust the metadata filters or seed list."
|
|
1108
|
-
return f"No seeds match the provided metadata filters. {hint}"
|
|
1109
|
-
if isinstance(err, MetadataSQLExecutionError):
|
|
1110
|
-
return f"Failed to execute metadata SQL query '{err.query}': {err.detail}"
|
|
1111
|
-
if isinstance(err, MetadataSQLResultError):
|
|
1112
|
-
return f"metadata SQL query '{err.query}' must return integer seed values ({err.detail})"
|
|
1113
|
-
return str(err)
|
|
258
|
+
__all__ = ["eval_command"]
|