synth-ai 0.2.16__py3-none-any.whl ā 0.2.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/analyze_semantic_words.sh +2 -2
- examples/baseline/banking77_baseline.py +204 -0
- examples/baseline/crafter_baseline.py +407 -0
- examples/baseline/pokemon_red_baseline.py +326 -0
- examples/baseline/simple_baseline.py +56 -0
- examples/baseline/warming_up_to_rl_baseline.py +239 -0
- examples/blog_posts/gepa/README.md +355 -0
- examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
- examples/blog_posts/gepa/configs/banking77_gepa_test.toml +82 -0
- examples/blog_posts/gepa/configs/banking77_mipro_local.toml +52 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/hover_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/hover_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +59 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +36 -0
- examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +53 -0
- examples/blog_posts/gepa/configs/pupa_gepa_local.toml +60 -0
- examples/blog_posts/gepa/configs/pupa_mipro_local.toml +54 -0
- examples/blog_posts/gepa/deploy_banking77_task_app.sh +41 -0
- examples/blog_posts/gepa/gepa_baseline.py +204 -0
- examples/blog_posts/gepa/query_prompts_example.py +97 -0
- examples/blog_posts/gepa/run_gepa_banking77.sh +87 -0
- examples/blog_posts/gepa/task_apps.py +105 -0
- examples/blog_posts/gepa/test_gepa_local.sh +67 -0
- examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
- examples/blog_posts/pokemon_vl/README.md +98 -0
- examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
- examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +27 -0
- examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
- examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
- examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +43 -0
- examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
- examples/blog_posts/pokemon_vl/extract_images.py +239 -0
- examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
- examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
- examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
- examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
- examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
- examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
- examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
- examples/blog_posts/warming_up_to_rl/README.md +158 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
- examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
- examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
- examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +91 -0
- examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
- examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
- examples/dev/qwen3_32b_qlora_4xh100.toml +5 -0
- examples/multi_step/configs/VERILOG_REWARDS.md +4 -0
- examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +4 -0
- examples/multi_step/configs/crafter_rl_outcome.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +65 -107
- examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_simple.toml +2 -1
- examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
- examples/multi_step/configs/verilog_rl_lora.toml +80 -123
- examples/qwen_coder/configs/coder_lora_30b.toml +1 -3
- examples/qwen_coder/configs/coder_lora_4b.toml +4 -1
- examples/qwen_coder/configs/coder_lora_small.toml +1 -3
- examples/qwen_vl/README.md +10 -12
- examples/qwen_vl/SETUP_COMPLETE.md +7 -8
- examples/qwen_vl/VISION_TESTS_COMPLETE.md +2 -3
- examples/qwen_vl/collect_data_via_cli.md +76 -84
- examples/qwen_vl/collect_vision_traces.py +4 -4
- examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +40 -57
- examples/qwen_vl/configs/crafter_vlm_sft_example.toml +1 -2
- examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +20 -37
- examples/qwen_vl/configs/eval_gpt5nano_vision.toml +21 -40
- examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
- examples/qwen_vl/configs/{filter_qwen2vl_sft.toml ā filter_qwen3vl_sft.toml} +4 -5
- examples/qwen_vl/configs/filter_vision_sft.toml +2 -3
- examples/qwen_vl/crafter_qwen_vl_agent.py +5 -5
- examples/qwen_vl/run_vision_comparison.sh +6 -7
- examples/rl/README.md +5 -5
- examples/rl/configs/rl_from_base_qwen.toml +26 -1
- examples/rl/configs/rl_from_base_qwen17.toml +6 -2
- examples/rl/task_app/README.md +1 -2
- examples/rl/task_app/math_single_step.py +2 -2
- examples/run_crafter_demo.sh +2 -2
- examples/sft/README.md +1 -1
- examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -1
- examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -1
- examples/swe/task_app/README.md +32 -2
- examples/swe/task_app/grpo_swe_mini.py +4 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +1 -1
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +37 -10
- examples/swe/task_app/hosted/inference/openai_client.py +4 -38
- examples/swe/task_app/hosted/policy_routes.py +17 -0
- examples/swe/task_app/hosted/rollout.py +4 -2
- examples/swe/task_app/morph_backend.py +178 -0
- examples/task_apps/banking77/__init__.py +6 -0
- examples/task_apps/banking77/banking77_task_app.py +841 -0
- examples/task_apps/banking77/deploy_wrapper.py +46 -0
- examples/task_apps/crafter/CREATE_SFT_DATASET.md +4 -0
- examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +4 -0
- examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +4 -0
- examples/task_apps/crafter/task_app/README.md +1 -1
- examples/task_apps/crafter/task_app/grpo_crafter.py +90 -5
- examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +1 -1
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +4 -26
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -2
- examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +49 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +372 -107
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +81 -12
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +82 -11
- examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +194 -1
- examples/task_apps/enron/task_app/grpo_enron_task_app.py +1 -1
- examples/task_apps/gepa_benchmarks/__init__.py +7 -0
- examples/task_apps/gepa_benchmarks/common.py +260 -0
- examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
- examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
- examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
- examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
- examples/task_apps/math/README.md +1 -2
- examples/task_apps/pokemon_red/README.md +3 -4
- examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +4 -0
- examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +6 -5
- examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +1 -2
- examples/task_apps/pokemon_red/task_app.py +288 -39
- examples/task_apps/sokoban/README.md +2 -3
- examples/task_apps/verilog/eval_groq_qwen32b.toml +12 -14
- examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +1 -1
- examples/vlm/configs/crafter_vlm_gpt4o.toml +4 -1
- examples/warming_up_to_rl/configs/crafter_fft.toml +4 -1
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +0 -2
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +3 -2
- examples/warming_up_to_rl/run_local_rollout_traced.py +1 -1
- examples/warming_up_to_rl/task_app/README.md +1 -1
- examples/warming_up_to_rl/task_app/grpo_crafter.py +185 -5
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +3 -27
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +49 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +156 -45
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +37 -4
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +33 -3
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +67 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen.toml +27 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +6 -0
- synth_ai/api/train/builders.py +99 -4
- synth_ai/api/train/cli.py +516 -26
- synth_ai/api/train/config_finder.py +13 -2
- synth_ai/api/train/configs/__init__.py +23 -2
- synth_ai/api/train/configs/prompt_learning.py +442 -0
- synth_ai/api/train/configs/rl.py +61 -7
- synth_ai/api/train/configs/sft.py +6 -2
- synth_ai/api/train/configs/shared.py +59 -2
- synth_ai/api/train/task_app.py +1 -1
- synth_ai/api/train/validators.py +277 -0
- synth_ai/auth/credentials.py +119 -0
- synth_ai/baseline/__init__.py +25 -0
- synth_ai/baseline/config.py +209 -0
- synth_ai/baseline/discovery.py +214 -0
- synth_ai/baseline/execution.py +146 -0
- synth_ai/cli/__init__.py +94 -18
- synth_ai/cli/__main__.py +0 -0
- synth_ai/cli/claude.py +70 -0
- synth_ai/cli/codex.py +84 -0
- synth_ai/cli/commands/__init__.py +18 -0
- synth_ai/cli/commands/baseline/__init__.py +12 -0
- synth_ai/cli/commands/baseline/core.py +637 -0
- synth_ai/cli/commands/baseline/list.py +93 -0
- synth_ai/cli/commands/demo/__init__.py +6 -0
- synth_ai/cli/commands/demo/core.py +163 -0
- synth_ai/cli/commands/eval/__init__.py +19 -0
- synth_ai/cli/commands/eval/core.py +1112 -0
- synth_ai/cli/commands/eval/errors.py +81 -0
- synth_ai/cli/commands/eval/validation.py +133 -0
- synth_ai/cli/commands/filter/__init__.py +12 -0
- synth_ai/cli/commands/filter/core.py +424 -0
- synth_ai/cli/commands/filter/errors.py +55 -0
- synth_ai/cli/commands/filter/validation.py +77 -0
- synth_ai/cli/commands/help/__init__.py +177 -0
- synth_ai/cli/commands/help/core.py +72 -0
- synth_ai/cli/commands/smoke/__init__.py +7 -0
- synth_ai/cli/commands/smoke/core.py +1436 -0
- synth_ai/cli/commands/status/__init__.py +64 -0
- synth_ai/cli/commands/status/client.py +192 -0
- synth_ai/cli/commands/status/config.py +92 -0
- synth_ai/cli/commands/status/errors.py +20 -0
- synth_ai/cli/commands/status/formatters.py +164 -0
- synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
- synth_ai/cli/commands/status/subcommands/files.py +79 -0
- synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
- synth_ai/cli/commands/status/subcommands/models.py +79 -0
- synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
- synth_ai/cli/commands/status/subcommands/runs.py +81 -0
- synth_ai/cli/commands/status/subcommands/summary.py +47 -0
- synth_ai/cli/commands/status/subcommands/usage.py +203 -0
- synth_ai/cli/commands/status/utils.py +114 -0
- synth_ai/cli/commands/train/__init__.py +53 -0
- synth_ai/cli/commands/train/core.py +21 -0
- synth_ai/cli/commands/train/errors.py +117 -0
- synth_ai/cli/commands/train/judge_schemas.py +200 -0
- synth_ai/cli/commands/train/judge_validation.py +305 -0
- synth_ai/cli/commands/train/validation.py +386 -0
- synth_ai/cli/demo.py +30 -158
- synth_ai/cli/deploy/__init__.py +43 -0
- synth_ai/cli/deploy.py +162 -0
- synth_ai/cli/eval/__init__.py +36 -0
- synth_ai/cli/eval/core.py +5 -0
- synth_ai/cli/eval/errors.py +31 -0
- synth_ai/cli/eval/validation.py +5 -0
- synth_ai/cli/filter/__init__.py +28 -0
- synth_ai/cli/filter/core.py +5 -0
- synth_ai/cli/filter/errors.py +23 -0
- synth_ai/cli/filter/validation.py +5 -0
- synth_ai/cli/legacy_root_backup.py +14 -8
- synth_ai/cli/modal_serve/__init__.py +12 -0
- synth_ai/cli/modal_serve/core.py +14 -0
- synth_ai/cli/modal_serve/errors.py +8 -0
- synth_ai/cli/modal_serve/validation.py +11 -0
- synth_ai/cli/opencode.py +107 -0
- synth_ai/cli/root.py +9 -5
- synth_ai/cli/serve/__init__.py +12 -0
- synth_ai/cli/serve/core.py +14 -0
- synth_ai/cli/serve/errors.py +8 -0
- synth_ai/cli/serve/validation.py +11 -0
- synth_ai/cli/setup.py +20 -265
- synth_ai/cli/status.py +7 -126
- synth_ai/cli/task_app_deploy.py +1 -10
- synth_ai/cli/task_app_modal_serve.py +4 -9
- synth_ai/cli/task_app_serve.py +4 -11
- synth_ai/cli/task_apps.py +51 -1480
- synth_ai/cli/train/__init__.py +12 -0
- synth_ai/cli/train/core.py +21 -0
- synth_ai/cli/train/errors.py +8 -0
- synth_ai/cli/train/validation.py +24 -0
- synth_ai/cli/train.py +1 -14
- synth_ai/demos/crafter/grpo_crafter_task_app.py +1 -1
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
- synth_ai/environments/examples/red/engine.py +33 -12
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
- synth_ai/environments/examples/red/environment.py +26 -0
- synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
- synth_ai/http.py +12 -0
- synth_ai/judge_schemas.py +10 -10
- synth_ai/learning/__init__.py +10 -0
- synth_ai/learning/prompt_learning_client.py +276 -0
- synth_ai/learning/prompt_learning_types.py +184 -0
- synth_ai/learning/rl/client.py +3 -1
- synth_ai/pricing/__init__.py +2 -0
- synth_ai/pricing/model_pricing.py +57 -0
- synth_ai/streaming/__init__.py +29 -0
- synth_ai/streaming/config.py +94 -0
- synth_ai/streaming/handlers.py +518 -0
- synth_ai/streaming/streamer.py +320 -0
- synth_ai/streaming/types.py +95 -0
- synth_ai/task/apps/__init__.py +1 -0
- synth_ai/task/config.py +2 -0
- synth_ai/task/tracing_utils.py +25 -25
- synth_ai/task/validators.py +45 -9
- synth_ai/task_app_cfgs.py +21 -0
- synth_ai/tracing_v3/config.py +162 -19
- synth_ai/tracing_v3/constants.py +1 -1
- synth_ai/tracing_v3/db_config.py +24 -38
- synth_ai/tracing_v3/migration_helper.py +1 -2
- synth_ai/tracing_v3/storage/config.py +47 -13
- synth_ai/tracing_v3/storage/factory.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +113 -11
- synth_ai/tracing_v3/turso/native_manager.py +92 -16
- synth_ai/types.py +8 -0
- synth_ai/urls.py +11 -0
- synth_ai/utils/__init__.py +30 -1
- synth_ai/utils/agents.py +74 -0
- synth_ai/utils/bin.py +39 -0
- synth_ai/utils/cli.py +149 -5
- synth_ai/utils/env.py +40 -33
- synth_ai/utils/http.py +4 -1
- synth_ai/utils/json.py +72 -0
- synth_ai/utils/modal.py +285 -3
- synth_ai/utils/paths.py +48 -0
- synth_ai/utils/uvicorn.py +113 -0
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/METADATA +109 -6
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/RECORD +291 -142
- examples/qwen_vl/configs/eval_qwen2vl_vision.toml +0 -44
- synth_ai/cli/tui.py +0 -62
- synth_ai/tui/__init__.py +0 -5
- synth_ai/tui/__main__.py +0 -13
- synth_ai/tui/cli/__init__.py +0 -1
- synth_ai/tui/cli/query_experiments.py +0 -164
- synth_ai/tui/cli/query_experiments_v3.py +0 -164
- synth_ai/tui/dashboard.py +0 -911
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.16.dist-info ā synth_ai-0.2.19.dist-info}/top_level.txt +0 -0
|
@@ -7,7 +7,7 @@ from typing import Any
|
|
|
7
7
|
from pydantic import Field
|
|
8
8
|
|
|
9
9
|
from ..utils import load_toml
|
|
10
|
-
from .shared import AlgorithmConfig, ComputeConfig, ExtraModel
|
|
10
|
+
from .shared import AlgorithmConfig, ComputeConfig, ExtraModel, LoraConfig, PolicyConfig
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class JobConfig(ExtraModel):
|
|
@@ -35,6 +35,7 @@ class TrainingConfig(ExtraModel):
|
|
|
35
35
|
mode: str | None = None
|
|
36
36
|
use_qlora: bool | None = None
|
|
37
37
|
validation: TrainingValidationConfig | None = None
|
|
38
|
+
lora: LoraConfig | None = None # NEW: nested LoRA config
|
|
38
39
|
|
|
39
40
|
|
|
40
41
|
class HyperparametersParallelism(ExtraModel):
|
|
@@ -65,10 +66,12 @@ class HyperparametersConfig(ExtraModel):
|
|
|
65
66
|
class SFTConfig(ExtraModel):
|
|
66
67
|
algorithm: AlgorithmConfig | None = None
|
|
67
68
|
job: JobConfig
|
|
69
|
+
policy: PolicyConfig | None = None # NEW: unified policy section
|
|
68
70
|
compute: ComputeConfig | None = None
|
|
69
71
|
data: SFTDataConfig | None = None
|
|
70
72
|
training: TrainingConfig | None = None
|
|
71
73
|
hyperparameters: HyperparametersConfig = Field(default_factory=HyperparametersConfig)
|
|
74
|
+
lora: dict[str, Any] | None = None # DEPRECATED: use training.lora instead
|
|
72
75
|
tags: dict[str, Any] | None = None
|
|
73
76
|
|
|
74
77
|
def to_dict(self) -> dict[str, Any]:
|
|
@@ -76,7 +79,8 @@ class SFTConfig(ExtraModel):
|
|
|
76
79
|
|
|
77
80
|
@classmethod
|
|
78
81
|
def from_mapping(cls, data: Mapping[str, Any]) -> SFTConfig:
|
|
79
|
-
|
|
82
|
+
"""Load SFT config from dict/TOML mapping."""
|
|
83
|
+
return cls.model_validate(data)
|
|
80
84
|
|
|
81
85
|
@classmethod
|
|
82
86
|
def from_path(cls, path: Path) -> SFTConfig:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from pydantic import BaseModel, ConfigDict
|
|
3
|
+
from pydantic import BaseModel, ConfigDict, model_validator
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class ExtraModel(BaseModel):
|
|
@@ -15,10 +15,67 @@ class AlgorithmConfig(ExtraModel):
|
|
|
15
15
|
variety: str
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
class TopologyConfig(ExtraModel):
|
|
19
|
+
"""Compute topology configuration - how GPUs are distributed across processes."""
|
|
20
|
+
type: str | None = None # e.g., "single_node_split"
|
|
21
|
+
gpus_for_vllm: int | None = None
|
|
22
|
+
gpus_for_training: int | None = None
|
|
23
|
+
gpus_for_ref: int | None = None
|
|
24
|
+
tensor_parallel: int | None = None
|
|
25
|
+
reference_placement: str | None = None # NEW: e.g., "none", "shared", "dedicated"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class LoraConfig(ExtraModel):
|
|
29
|
+
"""LoRA (Low-Rank Adaptation) training configuration."""
|
|
30
|
+
r: int | None = None # Rank
|
|
31
|
+
alpha: int | None = None
|
|
32
|
+
dropout: float | None = None
|
|
33
|
+
target_modules: list[str] | None = None
|
|
34
|
+
|
|
35
|
+
|
|
18
36
|
class ComputeConfig(ExtraModel):
|
|
19
37
|
gpu_type: str
|
|
20
38
|
gpu_count: int
|
|
21
39
|
nodes: int | None = None
|
|
40
|
+
topology: TopologyConfig | None = None # NEW: nested topology
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class PolicyConfig(ExtraModel):
|
|
44
|
+
"""Unified policy configuration for both SFT and RL.
|
|
45
|
+
|
|
46
|
+
This is the SINGLE SOURCE OF TRUTH for:
|
|
47
|
+
- What model to use (model_name or source)
|
|
48
|
+
- How to sample from it (temperature, max_tokens, etc.)
|
|
49
|
+
- How to train it (trainer_mode, label)
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
# Model specification (exactly one required)
|
|
53
|
+
model_name: str | None = None # e.g., "Qwen/Qwen3-4B"
|
|
54
|
+
source: str | None = None # e.g., "ft:abc123" for checkpoints
|
|
55
|
+
|
|
56
|
+
# Sampling parameters (with sensible defaults)
|
|
57
|
+
max_tokens: int = 512
|
|
58
|
+
temperature: float = 0.7
|
|
59
|
+
top_p: float = 0.95
|
|
60
|
+
top_k: int | None = None
|
|
61
|
+
repetition_penalty: float = 1.0
|
|
62
|
+
stop_sequences: list[str] | None = None
|
|
63
|
+
|
|
64
|
+
# Training-specific
|
|
65
|
+
trainer_mode: str # "lora", "full", "qlora"
|
|
66
|
+
label: str # Model identifier/name
|
|
67
|
+
|
|
68
|
+
# Optional - for distributed inference
|
|
69
|
+
inference_url: str | None = None
|
|
70
|
+
|
|
71
|
+
@model_validator(mode="after")
|
|
72
|
+
def _ensure_exactly_one_source(self) -> PolicyConfig:
|
|
73
|
+
"""Ensure exactly one of model_name or source is set."""
|
|
74
|
+
if not (bool(self.model_name) ^ bool(self.source)):
|
|
75
|
+
raise ValueError(
|
|
76
|
+
"Must set exactly one: [policy].model_name OR [policy].source"
|
|
77
|
+
)
|
|
78
|
+
return self
|
|
22
79
|
|
|
23
80
|
|
|
24
|
-
__all__ = ["ExtraModel", "AlgorithmConfig", "ComputeConfig"]
|
|
81
|
+
__all__ = ["ExtraModel", "AlgorithmConfig", "ComputeConfig", "PolicyConfig", "TopologyConfig", "LoraConfig"]
|
synth_ai/api/train/task_app.py
CHANGED
|
@@ -38,7 +38,7 @@ def _health_response_ok(resp: requests.Response | None) -> tuple[bool, str]:
|
|
|
38
38
|
return False, ""
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
def check_task_app_health(base_url: str, api_key: str, *, timeout: float =
|
|
41
|
+
def check_task_app_health(base_url: str, api_key: str, *, timeout: float = 30.0) -> TaskAppHealth:
|
|
42
42
|
# Send ALL known environment keys so the server can authorize any valid one
|
|
43
43
|
import os
|
|
44
44
|
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
"""SDK-side validation for training configs - catch errors BEFORE sending to backend."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import click
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ConfigValidationError(Exception):
|
|
10
|
+
"""Raised when a training config is invalid."""
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def validate_prompt_learning_config(config_data: dict[str, Any], config_path: Path) -> None:
|
|
15
|
+
"""
|
|
16
|
+
Validate prompt learning config BEFORE sending to backend.
|
|
17
|
+
|
|
18
|
+
This catches common errors early with clear messages instead of cryptic backend errors.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
config_data: Parsed TOML/JSON config
|
|
22
|
+
config_path: Path to config file (for error messages)
|
|
23
|
+
|
|
24
|
+
Raises:
|
|
25
|
+
ConfigValidationError: If config is invalid
|
|
26
|
+
click.ClickException: If validation fails (for CLI)
|
|
27
|
+
"""
|
|
28
|
+
errors: list[str] = []
|
|
29
|
+
|
|
30
|
+
# Check for prompt_learning section
|
|
31
|
+
pl_section = config_data.get("prompt_learning")
|
|
32
|
+
if not pl_section:
|
|
33
|
+
errors.append(
|
|
34
|
+
"Missing [prompt_learning] section in config. "
|
|
35
|
+
"Expected: [prompt_learning] with algorithm, task_app_url, etc."
|
|
36
|
+
)
|
|
37
|
+
_raise_validation_errors(errors, config_path)
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
if not isinstance(pl_section, dict):
|
|
41
|
+
errors.append(
|
|
42
|
+
f"[prompt_learning] must be a table/dict, got {type(pl_section).__name__}"
|
|
43
|
+
)
|
|
44
|
+
_raise_validation_errors(errors, config_path)
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
# CRITICAL: Validate algorithm field
|
|
48
|
+
algorithm = pl_section.get("algorithm")
|
|
49
|
+
if not algorithm:
|
|
50
|
+
errors.append(
|
|
51
|
+
"Missing required field: prompt_learning.algorithm\n"
|
|
52
|
+
" Must be one of: 'gepa', 'mipro'\n"
|
|
53
|
+
" Example:\n"
|
|
54
|
+
" [prompt_learning]\n"
|
|
55
|
+
" algorithm = \"gepa\""
|
|
56
|
+
)
|
|
57
|
+
elif algorithm not in ("gepa", "mipro"):
|
|
58
|
+
errors.append(
|
|
59
|
+
f"Invalid algorithm: '{algorithm}'\n"
|
|
60
|
+
f" Must be one of: 'gepa', 'mipro' (Note: MIPRO not yet implemented)\n"
|
|
61
|
+
f" Got: '{algorithm}'"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Validate task_app_url
|
|
65
|
+
task_app_url = pl_section.get("task_app_url")
|
|
66
|
+
if not task_app_url:
|
|
67
|
+
errors.append(
|
|
68
|
+
"Missing required field: prompt_learning.task_app_url\n"
|
|
69
|
+
" Example:\n"
|
|
70
|
+
" task_app_url = \"http://127.0.0.1:8102\""
|
|
71
|
+
)
|
|
72
|
+
elif not isinstance(task_app_url, str):
|
|
73
|
+
errors.append(
|
|
74
|
+
f"task_app_url must be a string, got {type(task_app_url).__name__}"
|
|
75
|
+
)
|
|
76
|
+
elif not task_app_url.startswith(("http://", "https://")):
|
|
77
|
+
errors.append(
|
|
78
|
+
f"task_app_url must start with http:// or https://, got: '{task_app_url}'"
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Validate initial_prompt if present
|
|
82
|
+
initial_prompt = pl_section.get("initial_prompt")
|
|
83
|
+
if initial_prompt:
|
|
84
|
+
if not isinstance(initial_prompt, dict):
|
|
85
|
+
errors.append(
|
|
86
|
+
f"prompt_learning.initial_prompt must be a table/dict, got {type(initial_prompt).__name__}"
|
|
87
|
+
)
|
|
88
|
+
else:
|
|
89
|
+
# Validate messages array
|
|
90
|
+
messages = initial_prompt.get("messages")
|
|
91
|
+
if messages is not None:
|
|
92
|
+
if not isinstance(messages, list):
|
|
93
|
+
errors.append(
|
|
94
|
+
f"prompt_learning.initial_prompt.messages must be an array, got {type(messages).__name__}"
|
|
95
|
+
)
|
|
96
|
+
elif len(messages) == 0:
|
|
97
|
+
errors.append(
|
|
98
|
+
"prompt_learning.initial_prompt.messages is empty (must have at least one message)"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Validate policy config
|
|
102
|
+
policy = pl_section.get("policy")
|
|
103
|
+
if not policy or not isinstance(policy, dict):
|
|
104
|
+
errors.append("Missing [prompt_learning.policy] section or not a table")
|
|
105
|
+
else:
|
|
106
|
+
# Enforce inference_mode
|
|
107
|
+
mode = str(policy.get("inference_mode", "")).strip().lower()
|
|
108
|
+
if not mode:
|
|
109
|
+
errors.append("Missing required field: prompt_learning.policy.inference_mode (must be 'synth_hosted')")
|
|
110
|
+
elif mode != "synth_hosted":
|
|
111
|
+
errors.append("prompt_learning.policy.inference_mode must be 'synth_hosted' (bring_your_own unsupported)")
|
|
112
|
+
# Required fields for synth_hosted
|
|
113
|
+
provider = (policy.get("provider") or "").strip().lower()
|
|
114
|
+
model = (policy.get("model") or "").strip()
|
|
115
|
+
inference_url = (policy.get("inference_url") or "").strip()
|
|
116
|
+
if not provider:
|
|
117
|
+
errors.append("Missing required field: prompt_learning.policy.provider")
|
|
118
|
+
if not model:
|
|
119
|
+
errors.append("Missing required field: prompt_learning.policy.model")
|
|
120
|
+
if not inference_url:
|
|
121
|
+
errors.append("Missing required field: prompt_learning.policy.inference_url")
|
|
122
|
+
elif not isinstance(inference_url, str) or not inference_url.startswith(("http://", "https://")):
|
|
123
|
+
errors.append(f"policy.inference_url must start with http:// or https://, got: '{inference_url}'")
|
|
124
|
+
|
|
125
|
+
# Validate algorithm-specific config
|
|
126
|
+
if algorithm == "gepa":
|
|
127
|
+
gepa_config = pl_section.get("gepa")
|
|
128
|
+
if not gepa_config or not isinstance(gepa_config, dict):
|
|
129
|
+
errors.append("Missing [prompt_learning.gepa] section for GEPA algorithm")
|
|
130
|
+
else:
|
|
131
|
+
# Numeric sanity checks
|
|
132
|
+
def _pos_int(name: str) -> None:
|
|
133
|
+
val = gepa_config.get(name)
|
|
134
|
+
if val is not None:
|
|
135
|
+
try:
|
|
136
|
+
ival = int(val)
|
|
137
|
+
if ival <= 0:
|
|
138
|
+
errors.append(f"prompt_learning.gepa.{name} must be > 0")
|
|
139
|
+
except Exception:
|
|
140
|
+
errors.append(f"prompt_learning.gepa.{name} must be an integer")
|
|
141
|
+
for fld in ("initial_population_size", "num_generations", "children_per_generation", "max_concurrent_rollouts"):
|
|
142
|
+
_pos_int(fld)
|
|
143
|
+
# Budget cap
|
|
144
|
+
if "max_spend_usd" in gepa_config and gepa_config.get("max_spend_usd") is not None:
|
|
145
|
+
try:
|
|
146
|
+
f = float(gepa_config.get("max_spend_usd"))
|
|
147
|
+
if f <= 0:
|
|
148
|
+
errors.append("prompt_learning.gepa.max_spend_usd must be > 0 when provided")
|
|
149
|
+
except Exception:
|
|
150
|
+
errors.append("prompt_learning.gepa.max_spend_usd must be numeric")
|
|
151
|
+
|
|
152
|
+
elif algorithm == "mipro":
|
|
153
|
+
# MIPRO is not yet implemented in synth-ai
|
|
154
|
+
errors.append(
|
|
155
|
+
"MIPRO algorithm is not yet implemented in synth-ai.\n"
|
|
156
|
+
" Please use 'gepa' algorithm for prompt optimization.\n"
|
|
157
|
+
" MIPRO support is planned for a future release.\n"
|
|
158
|
+
" Example:\n"
|
|
159
|
+
" [prompt_learning]\n"
|
|
160
|
+
" algorithm = \"gepa\"\n"
|
|
161
|
+
" [prompt_learning.gepa]\n"
|
|
162
|
+
" # ... gepa configuration"
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Raise all errors at once for better UX
|
|
166
|
+
if errors:
|
|
167
|
+
_raise_validation_errors(errors, config_path)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _raise_validation_errors(errors: list[str], config_path: Path) -> None:
|
|
171
|
+
"""Format and raise validation errors."""
|
|
172
|
+
error_msg = (
|
|
173
|
+
f"\nā Invalid prompt learning config: {config_path}\n\n"
|
|
174
|
+
f"Found {len(errors)} error(s):\n\n"
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
for i, error in enumerate(errors, 1):
|
|
178
|
+
# Indent multi-line errors
|
|
179
|
+
indented_error = "\n ".join(error.split("\n"))
|
|
180
|
+
error_msg += f"{i}. {indented_error}\n\n"
|
|
181
|
+
|
|
182
|
+
error_msg += (
|
|
183
|
+
"š See example configs:\n"
|
|
184
|
+
" - examples/blog_posts/gepa/configs/banking77_gepa_local.toml\n"
|
|
185
|
+
" - examples/blog_posts/mipro/configs/banking77_mipro_local.toml\n"
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
raise click.ClickException(error_msg)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def validate_rl_config(config_data: dict[str, Any], config_path: Path) -> None:
|
|
192
|
+
"""
|
|
193
|
+
Validate RL config BEFORE sending to backend.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
config_data: Parsed TOML/JSON config
|
|
197
|
+
config_path: Path to config file (for error messages)
|
|
198
|
+
|
|
199
|
+
Raises:
|
|
200
|
+
ConfigValidationError: If config is invalid
|
|
201
|
+
click.ClickException: If validation fails (for CLI)
|
|
202
|
+
"""
|
|
203
|
+
errors: list[str] = []
|
|
204
|
+
|
|
205
|
+
# Check for rl section
|
|
206
|
+
rl_section = config_data.get("rl") or config_data.get("online_rl")
|
|
207
|
+
if not rl_section:
|
|
208
|
+
errors.append(
|
|
209
|
+
"Missing [rl] or [online_rl] section in config"
|
|
210
|
+
)
|
|
211
|
+
_raise_validation_errors(errors, config_path)
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
# Validate algorithm
|
|
215
|
+
algorithm = rl_section.get("algorithm")
|
|
216
|
+
if not algorithm:
|
|
217
|
+
errors.append(
|
|
218
|
+
"Missing required field: rl.algorithm\n"
|
|
219
|
+
" Must be one of: 'grpo', 'ppo', etc."
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Validate task_url
|
|
223
|
+
task_url = rl_section.get("task_url")
|
|
224
|
+
if not task_url:
|
|
225
|
+
errors.append(
|
|
226
|
+
"Missing required field: rl.task_url"
|
|
227
|
+
)
|
|
228
|
+
elif not isinstance(task_url, str):
|
|
229
|
+
errors.append(
|
|
230
|
+
f"task_url must be a string, got {type(task_url).__name__}"
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
if errors:
|
|
234
|
+
_raise_validation_errors(errors, config_path)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def validate_sft_config(config_data: dict[str, Any], config_path: Path) -> None:
|
|
238
|
+
"""
|
|
239
|
+
Validate SFT config BEFORE sending to backend.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
config_data: Parsed TOML/JSON config
|
|
243
|
+
config_path: Path to config file (for error messages)
|
|
244
|
+
|
|
245
|
+
Raises:
|
|
246
|
+
ConfigValidationError: If config is invalid
|
|
247
|
+
click.ClickException: If validation fails (for CLI)
|
|
248
|
+
"""
|
|
249
|
+
errors: list[str] = []
|
|
250
|
+
|
|
251
|
+
# Check for sft section
|
|
252
|
+
sft_section = config_data.get("sft")
|
|
253
|
+
if not sft_section:
|
|
254
|
+
errors.append(
|
|
255
|
+
"Missing [sft] section in config"
|
|
256
|
+
)
|
|
257
|
+
_raise_validation_errors(errors, config_path)
|
|
258
|
+
return
|
|
259
|
+
|
|
260
|
+
# Validate model
|
|
261
|
+
model = sft_section.get("model")
|
|
262
|
+
if not model:
|
|
263
|
+
errors.append(
|
|
264
|
+
"Missing required field: sft.model"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
if errors:
|
|
268
|
+
_raise_validation_errors(errors, config_path)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
__all__ = [
|
|
272
|
+
"ConfigValidationError",
|
|
273
|
+
"validate_prompt_learning_config",
|
|
274
|
+
"validate_rl_config",
|
|
275
|
+
"validate_sft_config",
|
|
276
|
+
]
|
|
277
|
+
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import webbrowser
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from requests import RequestException
|
|
8
|
+
from synth_ai.utils.env import resolve_env_var, write_env_var_to_dotenv, write_env_var_to_json
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def fetch_credentials_from_web_browser_session(
|
|
12
|
+
browser: bool = True,
|
|
13
|
+
prod: bool = True
|
|
14
|
+
) -> None:
|
|
15
|
+
synth_api_key = ''
|
|
16
|
+
env_api_key = ''
|
|
17
|
+
org_name = ''
|
|
18
|
+
|
|
19
|
+
if browser:
|
|
20
|
+
origin = "https://www.usesynth.ai" if prod else "http://localhost:3000"
|
|
21
|
+
init_url = f"{origin}/api/sdk/handshake/init"
|
|
22
|
+
token_url =f"{origin}/api/sdk/handshake/token"
|
|
23
|
+
|
|
24
|
+
print(f"\nš Connecting to {origin} to fetch your Synth credentials")
|
|
25
|
+
|
|
26
|
+
# 1. Initialize browser handshake
|
|
27
|
+
try:
|
|
28
|
+
init_res = requests.post(init_url, timeout=10)
|
|
29
|
+
except RequestException as exc:
|
|
30
|
+
raise RuntimeError(f"Failed to reach handshake init endpoint: {exc}") from exc
|
|
31
|
+
|
|
32
|
+
if init_res.status_code != 200:
|
|
33
|
+
body = init_res.text.strip()
|
|
34
|
+
raise RuntimeError(f"Handshake init failed ({init_res.status_code}): {body or 'no response body'}")
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
init_data = init_res.json()
|
|
38
|
+
except ValueError as exc:
|
|
39
|
+
raise RuntimeError("Handshake init returned malformed JSON.") from exc
|
|
40
|
+
|
|
41
|
+
device_code = str(init_data.get("device_code") or "").strip()
|
|
42
|
+
verification_uri = str(init_data.get("verification_uri") or "").strip()
|
|
43
|
+
if not device_code or not verification_uri:
|
|
44
|
+
raise RuntimeError("Handshake init response missing device_code or verification_uri.")
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
expires_in = int(init_data.get("expires_in") or 600)
|
|
48
|
+
except (TypeError, ValueError):
|
|
49
|
+
expires_in = 120
|
|
50
|
+
try:
|
|
51
|
+
interval = max(int(init_data.get("interval") or 3), 1)
|
|
52
|
+
except (TypeError, ValueError):
|
|
53
|
+
interval = 3
|
|
54
|
+
|
|
55
|
+
# 2. Open browser to verification URL
|
|
56
|
+
with contextlib.suppress(Exception):
|
|
57
|
+
webbrowser.open(verification_uri)
|
|
58
|
+
|
|
59
|
+
deadline = time.time() + expires_in
|
|
60
|
+
handshake_data = None
|
|
61
|
+
|
|
62
|
+
# 3. Poll handshake token endpoint
|
|
63
|
+
while time.time() <= deadline:
|
|
64
|
+
try:
|
|
65
|
+
handshake_res = requests.post(
|
|
66
|
+
token_url,
|
|
67
|
+
json={"device_code": device_code},
|
|
68
|
+
timeout=10,
|
|
69
|
+
)
|
|
70
|
+
except RequestException:
|
|
71
|
+
time.sleep(interval)
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
if handshake_res.status_code == 200:
|
|
75
|
+
try:
|
|
76
|
+
handshake_data = handshake_res.json()
|
|
77
|
+
except ValueError as exc:
|
|
78
|
+
raise RuntimeError("Handshake token returned malformed JSON.") from exc
|
|
79
|
+
break
|
|
80
|
+
|
|
81
|
+
if handshake_res.status_code in (404, 410):
|
|
82
|
+
raise RuntimeError("Handshake failed: device code expired or was revoked.")
|
|
83
|
+
|
|
84
|
+
time.sleep(interval)
|
|
85
|
+
|
|
86
|
+
if handshake_data is None:
|
|
87
|
+
raise TimeoutError("Handshake timed out before credentials were returned.")
|
|
88
|
+
|
|
89
|
+
# 4. Extract credentials from handshake payload
|
|
90
|
+
org = handshake_data.get("org")
|
|
91
|
+
if not isinstance(org, dict):
|
|
92
|
+
org = {}
|
|
93
|
+
org_name = str(org.get("name") or "your organization").strip()
|
|
94
|
+
|
|
95
|
+
credentials = handshake_data.get("keys")
|
|
96
|
+
if not isinstance(credentials, dict):
|
|
97
|
+
credentials = {}
|
|
98
|
+
|
|
99
|
+
synth_api_key = str(credentials.get("synth") or "").strip()
|
|
100
|
+
env_api_key = str(credentials.get("rl_env") or "").strip()
|
|
101
|
+
|
|
102
|
+
print(f"\nā
Connected to {org_name}")
|
|
103
|
+
|
|
104
|
+
# Load credentials to process environment and save credentials to .env and ~/synth-ai/config.json
|
|
105
|
+
if synth_api_key:
|
|
106
|
+
print("\nLoading SYNTH_API_KEY into process environment")
|
|
107
|
+
os.environ["SYNTH_API_KEY"] = synth_api_key
|
|
108
|
+
synth_api_key = resolve_env_var("SYNTH_API_KEY")
|
|
109
|
+
if env_api_key:
|
|
110
|
+
print("\nLoading ENVIRONMENT_API_KEY into process environment")
|
|
111
|
+
os.environ["ENVIRONMENT_API_KEY"] = env_api_key
|
|
112
|
+
env_api_key = resolve_env_var("ENVIRONMENT_API_KEY")
|
|
113
|
+
|
|
114
|
+
if browser:
|
|
115
|
+
print('')
|
|
116
|
+
write_env_var_to_json("SYNTH_API_KEY", synth_api_key, "~/.synth-ai/config.json")
|
|
117
|
+
write_env_var_to_dotenv("SYNTH_API_KEY", synth_api_key)
|
|
118
|
+
write_env_var_to_json("ENVIRONMENT_API_KEY", env_api_key, "~/.synth-ai/config.json")
|
|
119
|
+
write_env_var_to_dotenv("ENVIRONMENT_API_KEY", env_api_key)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Baseline file system for self-contained task evaluation.
|
|
2
|
+
|
|
3
|
+
This package provides abstractions for defining and executing baseline evaluations
|
|
4
|
+
without requiring deployed task apps. Supports both class-based and function-based
|
|
5
|
+
task runners with first-class train/val/test split support.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from synth_ai.baseline.config import (
|
|
11
|
+
BaselineConfig,
|
|
12
|
+
BaselineResults,
|
|
13
|
+
BaselineTaskRunner,
|
|
14
|
+
DataSplit,
|
|
15
|
+
TaskResult,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
"BaselineConfig",
|
|
20
|
+
"BaselineTaskRunner",
|
|
21
|
+
"DataSplit",
|
|
22
|
+
"TaskResult",
|
|
23
|
+
"BaselineResults",
|
|
24
|
+
]
|
|
25
|
+
|