synth-ai 0.2.6.dev1__py3-none-any.whl → 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +44 -24
- synth_ai/__main__.py +30 -3
- synth_ai/cli/__init__.py +103 -48
- synth_ai/cli/__main__.py +42 -0
- synth_ai/cli/_internal/__init__.py +5 -0
- synth_ai/cli/_internal/modal_wrapper.py +31 -0
- synth_ai/cli/_internal/storage.py +20 -0
- synth_ai/cli/_internal/typer_patch.py +47 -0
- synth_ai/cli/_internal/validate_task_app.py +29 -0
- synth_ai/cli/agents/__init__.py +17 -0
- synth_ai/cli/agents/claude.py +77 -0
- synth_ai/cli/agents/codex.py +265 -0
- synth_ai/cli/agents/opencode.py +253 -0
- synth_ai/cli/commands/__init__.py +18 -0
- synth_ai/cli/commands/artifacts/__init__.py +13 -0
- synth_ai/cli/commands/artifacts/client.py +119 -0
- synth_ai/cli/commands/artifacts/config.py +57 -0
- synth_ai/cli/commands/artifacts/core.py +24 -0
- synth_ai/cli/commands/artifacts/download.py +188 -0
- synth_ai/cli/commands/artifacts/export.py +186 -0
- synth_ai/cli/commands/artifacts/list.py +156 -0
- synth_ai/cli/commands/artifacts/parsing.py +250 -0
- synth_ai/cli/commands/artifacts/show.py +336 -0
- synth_ai/cli/commands/demo/__init__.py +3 -0
- synth_ai/cli/commands/demo/core.py +153 -0
- synth_ai/cli/commands/eval/__init__.py +10 -0
- synth_ai/cli/commands/eval/config.py +338 -0
- synth_ai/cli/commands/eval/core.py +256 -0
- synth_ai/cli/commands/eval/runner.py +704 -0
- synth_ai/cli/commands/eval/validation.py +60 -0
- synth_ai/cli/commands/filter/__init__.py +12 -0
- synth_ai/cli/commands/filter/core.py +424 -0
- synth_ai/cli/commands/filter/errors.py +55 -0
- synth_ai/cli/commands/filter/validation.py +77 -0
- synth_ai/cli/commands/help/__init__.py +185 -0
- synth_ai/cli/commands/help/core.py +72 -0
- synth_ai/cli/commands/scan/__init__.py +19 -0
- synth_ai/cli/commands/scan/cloudflare_scanner.py +403 -0
- synth_ai/cli/commands/scan/core.py +344 -0
- synth_ai/cli/commands/scan/health_checker.py +242 -0
- synth_ai/cli/commands/scan/local_scanner.py +278 -0
- synth_ai/cli/commands/scan/models.py +83 -0
- synth_ai/cli/commands/smoke/__init__.py +7 -0
- synth_ai/cli/commands/smoke/core.py +1428 -0
- synth_ai/cli/commands/status/__init__.py +3 -0
- synth_ai/cli/commands/status/client.py +91 -0
- synth_ai/cli/commands/status/config.py +12 -0
- synth_ai/cli/commands/status/errors.py +11 -0
- synth_ai/cli/commands/status/subcommands/__init__.py +3 -0
- synth_ai/cli/commands/status/subcommands/config.py +13 -0
- synth_ai/cli/commands/status/subcommands/files.py +34 -0
- synth_ai/cli/commands/status/subcommands/jobs.py +51 -0
- synth_ai/cli/commands/status/subcommands/models.py +35 -0
- synth_ai/cli/commands/status/subcommands/runs.py +34 -0
- synth_ai/cli/commands/status/subcommands/session.py +77 -0
- synth_ai/cli/commands/status/subcommands/summary.py +39 -0
- synth_ai/cli/commands/status/subcommands/utils.py +41 -0
- synth_ai/cli/commands/status/utils.py +23 -0
- synth_ai/cli/commands/train/__init__.py +53 -0
- synth_ai/cli/commands/train/core.py +22 -0
- synth_ai/cli/commands/train/errors.py +117 -0
- synth_ai/cli/commands/train/judge_schemas.py +201 -0
- synth_ai/cli/commands/train/judge_validation.py +305 -0
- synth_ai/cli/commands/train/prompt_learning_validation.py +633 -0
- synth_ai/cli/commands/train/validation.py +392 -0
- synth_ai/cli/demo_apps/__init__.py +10 -0
- synth_ai/cli/demo_apps/core/__init__.py +28 -0
- synth_ai/cli/demo_apps/core/cli.py +1735 -0
- synth_ai/cli/demo_apps/crafter/__init__.py +1 -0
- synth_ai/cli/demo_apps/crafter/crafter_fft_4b.toml +55 -0
- synth_ai/cli/demo_apps/crafter/grpo_crafter_task_app.py +186 -0
- synth_ai/cli/demo_apps/crafter/rl_from_base_qwen4b.toml +74 -0
- synth_ai/cli/demo_apps/demo_registry.py +176 -0
- synth_ai/cli/demo_apps/demo_task_apps/__init__.py +7 -0
- synth_ai/{demos → cli/demo_apps}/demo_task_apps/core.py +117 -51
- synth_ai/cli/demo_apps/demo_task_apps/crafter/__init__.py +1 -0
- synth_ai/cli/demo_apps/demo_task_apps/crafter/configs/crafter_fft_4b.toml +53 -0
- synth_ai/cli/demo_apps/demo_task_apps/crafter/configs/rl_from_base_qwen4b.toml +73 -0
- synth_ai/cli/demo_apps/demo_task_apps/crafter/grpo_crafter_task_app.py +185 -0
- synth_ai/cli/demo_apps/demo_task_apps/math/_common.py +16 -0
- synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/app.py +2 -1
- synth_ai/cli/demo_apps/demo_task_apps/math/config.toml +73 -0
- synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/deploy_modal.py +3 -6
- synth_ai/cli/demo_apps/demo_task_apps/math/modal_task_app.py +738 -0
- synth_ai/cli/demo_apps/demo_task_apps/math/task_app_entry.py +39 -0
- synth_ai/cli/demo_apps/math/__init__.py +1 -0
- synth_ai/cli/demo_apps/math/_common.py +16 -0
- synth_ai/cli/demo_apps/math/app.py +38 -0
- synth_ai/cli/demo_apps/math/config.toml +75 -0
- synth_ai/cli/demo_apps/math/deploy_modal.py +54 -0
- synth_ai/cli/demo_apps/math/modal_task_app.py +698 -0
- synth_ai/cli/demo_apps/math/task_app_entry.py +53 -0
- synth_ai/cli/demo_apps/mipro/main.py +271 -0
- synth_ai/cli/demo_apps/mipro/task_app.py +922 -0
- synth_ai/cli/demo_apps/mipro/train_cfg.toml +92 -0
- synth_ai/cli/demos/__init__.py +12 -0
- synth_ai/cli/demos/demo.py +32 -0
- synth_ai/cli/demos/rl_demo.py +254 -0
- synth_ai/cli/deploy.py +216 -0
- synth_ai/cli/infra/__init__.py +14 -0
- synth_ai/cli/{balance.py → infra/balance.py} +21 -3
- synth_ai/cli/infra/mcp.py +35 -0
- synth_ai/cli/infra/modal_app.py +36 -0
- synth_ai/cli/infra/setup.py +69 -0
- synth_ai/cli/infra/status.py +16 -0
- synth_ai/cli/infra/turso.py +77 -0
- synth_ai/cli/lib/__init__.py +10 -0
- synth_ai/cli/lib/agents.py +76 -0
- synth_ai/cli/lib/apps/modal_app.py +101 -0
- synth_ai/cli/lib/apps/task_app.py +642 -0
- synth_ai/cli/lib/bin.py +39 -0
- synth_ai/cli/lib/env.py +375 -0
- synth_ai/cli/lib/errors.py +85 -0
- synth_ai/cli/lib/modal.py +315 -0
- synth_ai/cli/lib/plotting.py +126 -0
- synth_ai/cli/lib/prompt_args.py +39 -0
- synth_ai/cli/lib/prompts.py +284 -0
- synth_ai/cli/lib/sqld.py +122 -0
- synth_ai/cli/lib/task_app_discovery.py +884 -0
- synth_ai/cli/lib/task_app_env.py +295 -0
- synth_ai/cli/lib/train_cfgs.py +300 -0
- synth_ai/cli/lib/tunnel_records.py +207 -0
- synth_ai/cli/local/__init__.py +14 -0
- synth_ai/cli/local/experiment_queue/__init__.py +72 -0
- synth_ai/cli/local/experiment_queue/api_schemas.py +221 -0
- synth_ai/cli/local/experiment_queue/celery_app.py +208 -0
- synth_ai/cli/local/experiment_queue/config.py +128 -0
- synth_ai/cli/local/experiment_queue/config_utils.py +272 -0
- synth_ai/cli/local/experiment_queue/database.py +175 -0
- synth_ai/cli/local/experiment_queue/dispatcher.py +119 -0
- synth_ai/cli/local/experiment_queue/models.py +231 -0
- synth_ai/cli/local/experiment_queue/progress_info.py +160 -0
- synth_ai/cli/local/experiment_queue/results.py +373 -0
- synth_ai/cli/local/experiment_queue/schemas.py +131 -0
- synth_ai/cli/local/experiment_queue/service.py +344 -0
- synth_ai/cli/local/experiment_queue/status.py +372 -0
- synth_ai/cli/local/experiment_queue/status_tracker.py +360 -0
- synth_ai/cli/local/experiment_queue/tasks.py +1984 -0
- synth_ai/cli/local/experiment_queue/trace_storage.py +65 -0
- synth_ai/cli/local/experiment_queue/validation.py +157 -0
- synth_ai/cli/local/session/__init__.py +92 -0
- synth_ai/cli/local/session/client.py +383 -0
- synth_ai/cli/local/session/constants.py +63 -0
- synth_ai/cli/local/session/exceptions.py +105 -0
- synth_ai/cli/local/session/manager.py +139 -0
- synth_ai/cli/local/session/models.py +89 -0
- synth_ai/cli/local/session/query.py +110 -0
- synth_ai/cli/root.py +150 -102
- synth_ai/cli/task_apps/__init__.py +37 -0
- synth_ai/cli/task_apps/commands.py +3145 -0
- synth_ai/cli/task_apps/deploy.py +7 -0
- synth_ai/cli/task_apps/list.py +26 -0
- synth_ai/cli/task_apps/main.py +36 -0
- synth_ai/cli/task_apps/modal_serve.py +11 -0
- synth_ai/cli/task_apps/serve.py +11 -0
- synth_ai/cli/training/__init__.py +8 -0
- synth_ai/cli/training/train.py +5 -0
- synth_ai/cli/training/train_cfg.py +34 -0
- synth_ai/cli/{watch.py → training/watch.py} +13 -18
- synth_ai/cli/turso.py +52 -0
- synth_ai/cli/utils/__init__.py +8 -0
- synth_ai/cli/utils/experiments.py +235 -0
- synth_ai/cli/utils/queue.py +504 -0
- synth_ai/cli/{recent.py → utils/recent.py} +13 -7
- synth_ai/cli/{traces.py → utils/traces.py} +9 -5
- synth_ai/contracts/__init__.py +67 -0
- synth_ai/core/__init__.py +100 -0
- synth_ai/core/_utils/__init__.py +54 -0
- synth_ai/core/_utils/base_url.py +10 -0
- synth_ai/core/_utils/http.py +10 -0
- synth_ai/core/_utils/prompts.py +14 -0
- synth_ai/core/_utils/task_app_state.py +12 -0
- synth_ai/core/_utils/user_config.py +10 -0
- synth_ai/core/apps/common.py +116 -0
- synth_ai/core/auth.py +95 -0
- synth_ai/core/cfgs.py +240 -0
- synth_ai/core/config/__init__.py +16 -0
- synth_ai/core/config/base.py +168 -0
- synth_ai/core/config/resolver.py +89 -0
- synth_ai/core/env.py +231 -0
- synth_ai/core/errors.py +126 -0
- synth_ai/core/http.py +230 -0
- synth_ai/core/integrations/__init__.py +11 -0
- synth_ai/core/integrations/cloudflare.py +1710 -0
- synth_ai/core/integrations/mcp/__init__.py +6 -0
- synth_ai/core/integrations/mcp/__main__.py +8 -0
- synth_ai/core/integrations/mcp/claude.py +36 -0
- synth_ai/core/integrations/mcp/main.py +254 -0
- synth_ai/core/integrations/mcp/setup.py +100 -0
- synth_ai/core/integrations/modal.py +277 -0
- synth_ai/core/json.py +72 -0
- synth_ai/core/log_filter.py +99 -0
- synth_ai/core/logging.py +82 -0
- synth_ai/core/paths.py +107 -0
- synth_ai/core/pricing.py +109 -0
- synth_ai/core/process.py +233 -0
- synth_ai/core/ssl.py +25 -0
- synth_ai/core/storage/__init__.py +71 -0
- synth_ai/core/task_app_state.py +318 -0
- synth_ai/core/telemetry.py +282 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/__init__.py +5 -1
- synth_ai/{tracing_v3 → core/tracing_v3}/abstractions.py +21 -4
- synth_ai/core/tracing_v3/config.py +229 -0
- synth_ai/core/tracing_v3/constants.py +21 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/db_config.py +42 -29
- synth_ai/{tracing_v3 → core/tracing_v3}/decorators.py +80 -45
- synth_ai/{tracing_v3 → core/tracing_v3}/examples/basic_usage.py +15 -9
- synth_ai/{tracing_v3 → core/tracing_v3}/hooks.py +6 -4
- synth_ai/{tracing_v3 → core/tracing_v3}/llm_call_record_helpers.py +161 -61
- synth_ai/{tracing_v3 → core/tracing_v3}/migration_helper.py +1 -2
- synth_ai/{tracing_v3 → core/tracing_v3}/replica_sync.py +12 -7
- synth_ai/core/tracing_v3/serialization.py +130 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/session_tracer.py +88 -21
- synth_ai/{tracing_v3 → core/tracing_v3}/storage/base.py +99 -12
- synth_ai/core/tracing_v3/storage/config.py +109 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/storage/factory.py +11 -9
- synth_ai/{tracing_v3 → core/tracing_v3}/storage/utils.py +15 -11
- synth_ai/core/tracing_v3/trace_utils.py +326 -0
- synth_ai/core/tracing_v3/turso/__init__.py +12 -0
- synth_ai/core/tracing_v3/turso/daemon.py +278 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/turso/models.py +7 -3
- synth_ai/core/tracing_v3/turso/native_manager.py +1385 -0
- synth_ai/{tracing_v3 → core/tracing_v3}/utils.py +5 -4
- synth_ai/core/urls.py +18 -0
- synth_ai/core/user_config.py +137 -0
- synth_ai/core/uvicorn.py +222 -0
- synth_ai/data/__init__.py +83 -0
- synth_ai/data/enums.py +123 -0
- synth_ai/data/rewards.py +152 -0
- synth_ai/data/traces.py +35 -0
- synth_ai/products/__init__.py +6 -0
- synth_ai/products/graph_evolve/__init__.py +46 -0
- synth_ai/products/graph_evolve/client.py +226 -0
- synth_ai/products/graph_evolve/config.py +591 -0
- synth_ai/products/graph_evolve/converters/__init__.py +42 -0
- synth_ai/products/graph_evolve/converters/openai_sft.py +484 -0
- synth_ai/products/graph_evolve/examples/hotpotqa/config.toml +109 -0
- synth_ai/products/graph_evolve/run.py +222 -0
- synth_ai/products/graph_gepa/__init__.py +23 -0
- synth_ai/products/graph_gepa/converters/__init__.py +19 -0
- synth_ai/products/graph_gepa/converters/openai_sft.py +29 -0
- synth_ai/sdk/__init__.py +123 -0
- synth_ai/sdk/api/__init__.py +1 -0
- synth_ai/sdk/api/models/supported.py +514 -0
- synth_ai/sdk/api/research_agent/__init__.py +296 -0
- synth_ai/sdk/api/train/__init__.py +85 -0
- synth_ai/sdk/api/train/builders.py +895 -0
- synth_ai/sdk/api/train/cli.py +2199 -0
- synth_ai/sdk/api/train/config_finder.py +267 -0
- synth_ai/sdk/api/train/configs/__init__.py +65 -0
- synth_ai/sdk/api/train/configs/prompt_learning.py +1706 -0
- synth_ai/sdk/api/train/configs/rl.py +187 -0
- synth_ai/sdk/api/train/configs/sft.py +99 -0
- synth_ai/sdk/api/train/configs/shared.py +81 -0
- synth_ai/sdk/api/train/context_learning.py +312 -0
- synth_ai/sdk/api/train/env_resolver.py +418 -0
- synth_ai/sdk/api/train/graph_validators.py +216 -0
- synth_ai/sdk/api/train/graphgen.py +984 -0
- synth_ai/sdk/api/train/graphgen_models.py +823 -0
- synth_ai/sdk/api/train/graphgen_validators.py +109 -0
- synth_ai/sdk/api/train/local_api.py +10 -0
- synth_ai/sdk/api/train/pollers.py +124 -0
- synth_ai/sdk/api/train/progress/__init__.py +97 -0
- synth_ai/sdk/api/train/progress/dataclasses.py +569 -0
- synth_ai/sdk/api/train/progress/events.py +326 -0
- synth_ai/sdk/api/train/progress/results.py +428 -0
- synth_ai/sdk/api/train/progress/tracker.py +641 -0
- synth_ai/sdk/api/train/prompt_learning.py +469 -0
- synth_ai/sdk/api/train/rl.py +441 -0
- synth_ai/sdk/api/train/sft.py +396 -0
- synth_ai/sdk/api/train/summary.py +522 -0
- synth_ai/sdk/api/train/supported_algos.py +147 -0
- synth_ai/sdk/api/train/task_app.py +351 -0
- synth_ai/sdk/api/train/utils.py +279 -0
- synth_ai/sdk/api/train/validators.py +2424 -0
- synth_ai/sdk/graphs/__init__.py +15 -0
- synth_ai/sdk/graphs/completions.py +570 -0
- synth_ai/{inference → sdk/inference}/__init__.py +0 -1
- synth_ai/sdk/inference/client.py +128 -0
- synth_ai/sdk/jobs/__init__.py +16 -0
- synth_ai/sdk/jobs/client.py +371 -0
- synth_ai/sdk/judging/__init__.py +14 -0
- synth_ai/sdk/judging/base.py +24 -0
- synth_ai/sdk/judging/client.py +40 -0
- synth_ai/sdk/judging/schemas.py +222 -0
- synth_ai/sdk/judging/types.py +42 -0
- synth_ai/sdk/learning/__init__.py +99 -0
- synth_ai/sdk/learning/algorithms.py +14 -0
- synth_ai/{learning → sdk/learning}/client.py +121 -30
- synth_ai/sdk/learning/config.py +5 -0
- synth_ai/{learning → sdk/learning}/constants.py +0 -2
- synth_ai/sdk/learning/context_learning_client.py +531 -0
- synth_ai/sdk/learning/context_learning_types.py +292 -0
- synth_ai/sdk/learning/ft_client.py +7 -0
- synth_ai/{learning → sdk/learning}/health.py +15 -9
- synth_ai/{learning → sdk/learning}/jobs.py +44 -47
- synth_ai/sdk/learning/prompt_extraction.py +334 -0
- synth_ai/sdk/learning/prompt_learning_client.py +455 -0
- synth_ai/sdk/learning/prompt_learning_types.py +186 -0
- synth_ai/{rl → sdk/learning/rl}/__init__.py +13 -8
- synth_ai/{learning/rl_client.py → sdk/learning/rl/client.py} +89 -77
- synth_ai/sdk/learning/rl/config.py +31 -0
- synth_ai/{rl → sdk/learning/rl}/contracts.py +5 -14
- synth_ai/{rl → sdk/learning/rl}/env_keys.py +45 -16
- synth_ai/sdk/learning/rl/secrets.py +13 -0
- synth_ai/sdk/learning/rl_client.py +5 -0
- synth_ai/sdk/learning/sft/__init__.py +29 -0
- synth_ai/sdk/learning/sft/client.py +95 -0
- synth_ai/sdk/learning/sft/config.py +270 -0
- synth_ai/sdk/learning/sft/data.py +698 -0
- synth_ai/sdk/learning/sse.py +57 -0
- synth_ai/sdk/learning/validators.py +52 -0
- synth_ai/sdk/localapi/__init__.py +40 -0
- synth_ai/sdk/localapi/apps/__init__.py +28 -0
- synth_ai/sdk/localapi/client.py +10 -0
- synth_ai/sdk/localapi/contracts.py +10 -0
- synth_ai/sdk/localapi/helpers.py +519 -0
- synth_ai/sdk/localapi/rollouts.py +87 -0
- synth_ai/sdk/localapi/server.py +29 -0
- synth_ai/sdk/localapi/template.py +70 -0
- synth_ai/sdk/streaming/__init__.py +35 -0
- synth_ai/sdk/streaming/config.py +94 -0
- synth_ai/sdk/streaming/handlers.py +1997 -0
- synth_ai/sdk/streaming/streamer.py +713 -0
- synth_ai/sdk/streaming/types.py +112 -0
- synth_ai/sdk/task/__init__.py +164 -0
- synth_ai/sdk/task/apps/__init__.py +169 -0
- synth_ai/sdk/task/auth.py +165 -0
- synth_ai/sdk/task/client.py +175 -0
- synth_ai/sdk/task/config.py +257 -0
- synth_ai/sdk/task/contracts.py +219 -0
- synth_ai/sdk/task/datasets.py +108 -0
- synth_ai/sdk/task/errors.py +50 -0
- synth_ai/sdk/task/health.py +34 -0
- synth_ai/sdk/task/in_process.py +1190 -0
- synth_ai/sdk/task/in_process_runner.py +314 -0
- synth_ai/sdk/task/inference_api.py +299 -0
- synth_ai/sdk/task/json.py +111 -0
- synth_ai/sdk/task/proxy.py +287 -0
- synth_ai/sdk/task/rubrics/__init__.py +55 -0
- synth_ai/sdk/task/rubrics/loaders.py +156 -0
- synth_ai/sdk/task/rubrics/models.py +57 -0
- synth_ai/sdk/task/rubrics/scoring.py +116 -0
- synth_ai/sdk/task/rubrics/strict.py +149 -0
- synth_ai/sdk/task/rubrics.py +219 -0
- synth_ai/sdk/task/server.py +631 -0
- synth_ai/sdk/task/trace_correlation_helpers.py +539 -0
- synth_ai/sdk/task/tracing_utils.py +95 -0
- synth_ai/sdk/task/validators.py +441 -0
- synth_ai/sdk/task/vendors.py +59 -0
- synth_ai/sdk/training/__init__.py +102 -0
- synth_ai/sdk/tunnels/__init__.py +83 -0
- synth_ai/sdk/tunnels/cleanup.py +83 -0
- synth_ai/sdk/tunnels/ports.py +120 -0
- synth_ai/utils/__init__.py +213 -0
- synth_ai-0.4.3.dist-info/METADATA +262 -0
- synth_ai-0.4.3.dist-info/RECORD +370 -0
- {synth_ai-0.2.6.dev1.dist-info → synth_ai-0.4.3.dist-info}/entry_points.txt +0 -1
- synth_ai/cli/calc.py +0 -69
- synth_ai/cli/demo.py +0 -131
- synth_ai/cli/legacy_root_backup.py +0 -470
- synth_ai/cli/man.py +0 -106
- synth_ai/cli/rl_demo.py +0 -137
- synth_ai/cli/status.py +0 -133
- synth_ai/config/base_url.py +0 -98
- synth_ai/core/experiment.py +0 -15
- synth_ai/core/system.py +0 -15
- synth_ai/demos/core/__init__.py +0 -1
- synth_ai/demos/core/cli.py +0 -685
- synth_ai/demos/demo_task_apps/__init__.py +0 -1
- synth_ai/demos/demo_task_apps/math/config.toml +0 -44
- synth_ai/demos/demo_task_apps/math/deploy_task_app.sh +0 -22
- synth_ai/environments/__init__.py +0 -31
- synth_ai/environments/environment/__init__.py +0 -1
- synth_ai/environments/environment/artifacts/__init__.py +0 -1
- synth_ai/environments/environment/artifacts/base.py +0 -52
- synth_ai/environments/environment/core.py +0 -67
- synth_ai/environments/environment/db/__init__.py +0 -1
- synth_ai/environments/environment/db/sqlite.py +0 -45
- synth_ai/environments/environment/registry.py +0 -233
- synth_ai/environments/environment/resources/sqlite.py +0 -45
- synth_ai/environments/environment/results.py +0 -1
- synth_ai/environments/environment/rewards/__init__.py +0 -1
- synth_ai/environments/environment/rewards/core.py +0 -29
- synth_ai/environments/environment/shared_engine.py +0 -26
- synth_ai/environments/environment/tools/__init__.py +0 -200
- synth_ai/environments/examples/__init__.py +0 -1
- synth_ai/environments/examples/bandit/__init__.py +0 -33
- synth_ai/environments/examples/bandit/engine.py +0 -294
- synth_ai/environments/examples/bandit/environment.py +0 -194
- synth_ai/environments/examples/bandit/taskset.py +0 -200
- synth_ai/environments/examples/crafter_classic/__init__.py +0 -8
- synth_ai/environments/examples/crafter_classic/agent_demos/analyze_semantic_words_markdown.py +0 -250
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +0 -59
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +0 -152
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_config.toml +0 -24
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +0 -1194
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/crafter_synth_config.toml +0 -56
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_config_modal.toml +0 -32
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -724
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/kick_off_ft_modal.py +0 -384
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_action_results.py +0 -53
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_agent_actions.py +0 -178
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_latest_run.py +0 -222
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_lm_traces.py +0 -183
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_no_rewards.py +0 -210
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_trace_issue.py +0 -206
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_db_schema.py +0 -49
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_latest_results.py +0 -64
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/debug_agent_responses.py +0 -88
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/quick_trace_check.py +0 -77
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/compare_experiments.py +0 -324
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/kick_off_ft_oai.py +0 -362
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/multi_model_config.toml +0 -49
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_enhanced_hooks.py +0 -332
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_events.py +0 -97
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_results.py +0 -217
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_hook_storage.py +0 -87
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_seeds.py +0 -88
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/compare_seed_performance.py +0 -195
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/custom_eval_pipelines.py +0 -400
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/plot_hook_frequency.py +0 -195
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/seed_analysis_summary.py +0 -56
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/run_rollouts_for_models_and_compare_v3.py +0 -858
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +0 -52
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +0 -874
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +0 -1412
- synth_ai/environments/examples/crafter_classic/agent_demos/example_v3_usage.py +0 -216
- synth_ai/environments/examples/crafter_classic/agent_demos/old/compare_traces.py +0 -296
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_comprehensive_evaluation.py +0 -58
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_env_serialization.py +0 -464
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_evaluation_browser.py +0 -152
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_quick_evaluation.py +0 -51
- synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_trace_evaluation.py +0 -1412
- synth_ai/environments/examples/crafter_classic/agent_demos/old/debug_player_loss.py +0 -112
- synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_service.py +0 -203
- synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_slowness.py +0 -305
- synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_by_difficulty.py +0 -126
- synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_example.py +0 -94
- synth_ai/environments/examples/crafter_classic/agent_demos/old/explore_saved_states.py +0 -142
- synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft.py +0 -26
- synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft_OLD.py +0 -984
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_gemini.py +0 -724
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_modal.py +0 -386
- synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_metadata.py +0 -205
- synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_gemini.py +0 -150
- synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_modal.py +0 -283
- synth_ai/environments/examples/crafter_classic/agent_demos/old/prepare_vertex_ft.py +0 -280
- synth_ai/environments/examples/crafter_classic/agent_demos/old/profile_env_slowness.py +0 -456
- synth_ai/environments/examples/crafter_classic/agent_demos/old/replicate_issue.py +0 -166
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_and_eval.py +0 -102
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_comparison.py +0 -128
- synth_ai/environments/examples/crafter_classic/agent_demos/old/run_qwen_rollouts.py +0 -655
- synth_ai/environments/examples/crafter_classic/agent_demos/old/trace_eval_OLD.py +0 -202
- synth_ai/environments/examples/crafter_classic/agent_demos/old/validate_openai_format.py +0 -166
- synth_ai/environments/examples/crafter_classic/config_logging.py +0 -111
- synth_ai/environments/examples/crafter_classic/debug_translation.py +0 -0
- synth_ai/environments/examples/crafter_classic/engine.py +0 -579
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +0 -64
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +0 -6
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +0 -75
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +0 -267
- synth_ai/environments/examples/crafter_classic/environment.py +0 -404
- synth_ai/environments/examples/crafter_classic/taskset.py +0 -233
- synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +0 -228
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +0 -299
- synth_ai/environments/examples/crafter_custom/__init__.py +0 -4
- synth_ai/environments/examples/crafter_custom/agent_demos/__init__.py +0 -1
- synth_ai/environments/examples/crafter_custom/agent_demos/trace_eval.py +0 -202
- synth_ai/environments/examples/crafter_custom/crafter/__init__.py +0 -7
- synth_ai/environments/examples/crafter_custom/crafter/config.py +0 -182
- synth_ai/environments/examples/crafter_custom/crafter/constants.py +0 -8
- synth_ai/environments/examples/crafter_custom/crafter/engine.py +0 -269
- synth_ai/environments/examples/crafter_custom/crafter/env.py +0 -262
- synth_ai/environments/examples/crafter_custom/crafter/objects.py +0 -417
- synth_ai/environments/examples/crafter_custom/crafter/recorder.py +0 -187
- synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +0 -118
- synth_ai/environments/examples/crafter_custom/dataset_builder.py +0 -373
- synth_ai/environments/examples/crafter_custom/environment.py +0 -312
- synth_ai/environments/examples/crafter_custom/old/analyze_diamond_issue.py +0 -159
- synth_ai/environments/examples/crafter_custom/old/analyze_diamond_spawning.py +0 -158
- synth_ai/environments/examples/crafter_custom/old/compare_worlds.py +0 -71
- synth_ai/environments/examples/crafter_custom/old/dataset_stats.py +0 -105
- synth_ai/environments/examples/crafter_custom/old/diamond_spawning_summary.py +0 -119
- synth_ai/environments/examples/crafter_custom/old/example_dataset_usage.py +0 -52
- synth_ai/environments/examples/crafter_custom/run_dataset.py +0 -305
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +0 -156
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +0 -281
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +0 -25
- synth_ai/environments/examples/enron/engine.py +0 -295
- synth_ai/environments/examples/enron/environment.py +0 -166
- synth_ai/environments/examples/enron/taskset.py +0 -112
- synth_ai/environments/examples/enron/units/keyword_stats.py +0 -112
- synth_ai/environments/examples/minigrid/__init__.py +0 -48
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +0 -1188
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +0 -48
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +0 -562
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +0 -221
- synth_ai/environments/examples/minigrid/engine.py +0 -589
- synth_ai/environments/examples/minigrid/environment.py +0 -274
- synth_ai/environments/examples/minigrid/environment_mapping.py +0 -242
- synth_ai/environments/examples/minigrid/puzzle_loader.py +0 -417
- synth_ai/environments/examples/minigrid/taskset.py +0 -583
- synth_ai/environments/examples/nethack/__init__.py +0 -7
- synth_ai/environments/examples/nethack/achievements.py +0 -337
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +0 -981
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +0 -74
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +0 -831
- synth_ai/environments/examples/nethack/engine.py +0 -739
- synth_ai/environments/examples/nethack/environment.py +0 -256
- synth_ai/environments/examples/nethack/helpers/__init__.py +0 -41
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +0 -301
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +0 -402
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +0 -433
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +0 -200
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +0 -269
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +0 -308
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +0 -431
- synth_ai/environments/examples/nethack/taskset.py +0 -323
- synth_ai/environments/examples/red/__init__.py +0 -7
- synth_ai/environments/examples/red/agent_demos/__init__.py +0 -1
- synth_ai/environments/examples/red/config_logging.py +0 -110
- synth_ai/environments/examples/red/engine.py +0 -694
- synth_ai/environments/examples/red/engine_helpers/__init__.py +0 -1
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +0 -28
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +0 -276
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +0 -142
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +0 -57
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +0 -284
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +0 -150
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +0 -138
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +0 -57
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +0 -331
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +0 -121
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +0 -559
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +0 -313
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +0 -148
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +0 -247
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +0 -368
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +0 -140
- synth_ai/environments/examples/red/environment.py +0 -238
- synth_ai/environments/examples/red/taskset.py +0 -79
- synth_ai/environments/examples/red/units/__init__.py +0 -1
- synth_ai/environments/examples/sokoban/__init__.py +0 -1
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +0 -899
- synth_ai/environments/examples/sokoban/engine.py +0 -678
- synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +0 -1
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +0 -657
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +0 -18
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +0 -3
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +0 -131
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +0 -370
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +0 -332
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +0 -306
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +0 -67
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +0 -115
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +0 -123
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +0 -394
- synth_ai/environments/examples/sokoban/environment.py +0 -229
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +0 -440
- synth_ai/environments/examples/sokoban/puzzle_loader.py +0 -312
- synth_ai/environments/examples/sokoban/taskset.py +0 -428
- synth_ai/environments/examples/sokoban/units/astar_common.py +0 -95
- synth_ai/environments/examples/tictactoe/__init__.py +0 -1
- synth_ai/environments/examples/tictactoe/engine.py +0 -368
- synth_ai/environments/examples/tictactoe/environment.py +0 -240
- synth_ai/environments/examples/tictactoe/taskset.py +0 -215
- synth_ai/environments/examples/verilog/__init__.py +0 -10
- synth_ai/environments/examples/verilog/engine.py +0 -329
- synth_ai/environments/examples/verilog/environment.py +0 -350
- synth_ai/environments/examples/verilog/taskset.py +0 -420
- synth_ai/environments/examples/wordle/__init__.py +0 -29
- synth_ai/environments/examples/wordle/engine.py +0 -398
- synth_ai/environments/examples/wordle/environment.py +0 -159
- synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +0 -75
- synth_ai/environments/examples/wordle/taskset.py +0 -230
- synth_ai/environments/reproducibility/core.py +0 -42
- synth_ai/environments/reproducibility/helpers.py +0 -0
- synth_ai/environments/reproducibility/tree.py +0 -364
- synth_ai/environments/service/app.py +0 -91
- synth_ai/environments/service/core_routes.py +0 -1020
- synth_ai/environments/service/external_registry.py +0 -56
- synth_ai/environments/service/registry.py +0 -9
- synth_ai/environments/stateful/__init__.py +0 -1
- synth_ai/environments/stateful/core.py +0 -163
- synth_ai/environments/stateful/engine.py +0 -21
- synth_ai/environments/stateful/state.py +0 -7
- synth_ai/environments/tasks/api.py +0 -19
- synth_ai/environments/tasks/core.py +0 -80
- synth_ai/environments/tasks/filters.py +0 -41
- synth_ai/environments/tasks/utils.py +0 -91
- synth_ai/environments/v0_observability/history.py +0 -3
- synth_ai/environments/v0_observability/log.py +0 -2
- synth_ai/evals/base.py +0 -15
- synth_ai/experimental/synth_oss.py +0 -446
- synth_ai/http.py +0 -102
- synth_ai/inference/client.py +0 -20
- synth_ai/install_sqld.sh +0 -40
- synth_ai/jobs/client.py +0 -246
- synth_ai/learning/__init__.py +0 -24
- synth_ai/learning/config.py +0 -43
- synth_ai/learning/filtering.py +0 -0
- synth_ai/learning/ft_client.py +0 -59
- synth_ai/learning/offline/dpo.py +0 -0
- synth_ai/learning/offline/providers.py +0 -7
- synth_ai/learning/offline/sft.py +0 -0
- synth_ai/learning/offline/shared.py +0 -0
- synth_ai/learning/online/grpo.py +0 -0
- synth_ai/learning/online/irft.py +0 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
- synth_ai/learning/prompts/gepa.py +0 -0
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
- synth_ai/learning/prompts/mipro.py +0 -289
- synth_ai/learning/prompts/random_search.py +0 -246
- synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
- synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
- synth_ai/learning/sse.py +0 -58
- synth_ai/learning/validators.py +0 -48
- synth_ai/lm/__init__.py +0 -51
- synth_ai/lm/caching/constants.py +0 -6
- synth_ai/lm/caching/dbs.py +0 -0
- synth_ai/lm/caching/ephemeral.py +0 -102
- synth_ai/lm/caching/handler.py +0 -137
- synth_ai/lm/caching/initialize.py +0 -11
- synth_ai/lm/caching/persistent.py +0 -114
- synth_ai/lm/config.py +0 -110
- synth_ai/lm/constants.py +0 -32
- synth_ai/lm/core/__init__.py +0 -8
- synth_ai/lm/core/all.py +0 -73
- synth_ai/lm/core/exceptions.py +0 -7
- synth_ai/lm/core/main.py +0 -319
- synth_ai/lm/core/main_v3.py +0 -594
- synth_ai/lm/core/synth_models.py +0 -48
- synth_ai/lm/core/vendor_clients.py +0 -188
- synth_ai/lm/cost/__init__.py +0 -0
- synth_ai/lm/cost/monitor.py +0 -1
- synth_ai/lm/cost/statefulness.py +0 -1
- synth_ai/lm/injection.py +0 -80
- synth_ai/lm/overrides.py +0 -206
- synth_ai/lm/provider_support/__init__.py +0 -8
- synth_ai/lm/provider_support/anthropic.py +0 -972
- synth_ai/lm/provider_support/openai.py +0 -1139
- synth_ai/lm/provider_support/suppress_logging.py +0 -31
- synth_ai/lm/structured_outputs/__init__.py +0 -0
- synth_ai/lm/structured_outputs/handler.py +0 -440
- synth_ai/lm/structured_outputs/inject.py +0 -297
- synth_ai/lm/structured_outputs/rehabilitate.py +0 -185
- synth_ai/lm/tools/__init__.py +0 -3
- synth_ai/lm/tools/base.py +0 -172
- synth_ai/lm/unified_interface.py +0 -202
- synth_ai/lm/vendors/__init__.py +0 -0
- synth_ai/lm/vendors/base.py +0 -81
- synth_ai/lm/vendors/core/__init__.py +0 -0
- synth_ai/lm/vendors/core/anthropic_api.py +0 -387
- synth_ai/lm/vendors/core/gemini_api.py +0 -292
- synth_ai/lm/vendors/core/mistral_api.py +0 -322
- synth_ai/lm/vendors/core/openai_api.py +0 -220
- synth_ai/lm/vendors/core/synth_dev_api.py +0 -0
- synth_ai/lm/vendors/local/__init__.py +0 -0
- synth_ai/lm/vendors/local/ollama.py +0 -0
- synth_ai/lm/vendors/openai_standard.py +0 -780
- synth_ai/lm/vendors/openai_standard_responses.py +0 -256
- synth_ai/lm/vendors/retries.py +0 -22
- synth_ai/lm/vendors/supported/__init__.py +0 -0
- synth_ai/lm/vendors/supported/custom_endpoint.py +0 -417
- synth_ai/lm/vendors/supported/deepseek.py +0 -69
- synth_ai/lm/vendors/supported/grok.py +0 -75
- synth_ai/lm/vendors/supported/groq.py +0 -16
- synth_ai/lm/vendors/supported/ollama.py +0 -15
- synth_ai/lm/vendors/supported/openrouter.py +0 -74
- synth_ai/lm/vendors/supported/together.py +0 -11
- synth_ai/lm/vendors/synth_client.py +0 -808
- synth_ai/lm/warmup.py +0 -186
- synth_ai/rl/secrets.py +0 -19
- synth_ai/scripts/verify_rewards.py +0 -100
- synth_ai/task/__init__.py +0 -10
- synth_ai/task/contracts.py +0 -120
- synth_ai/task/health.py +0 -28
- synth_ai/task/validators.py +0 -12
- synth_ai/tracing/__init__.py +0 -30
- synth_ai/tracing_v1/__init__.py +0 -33
- synth_ai/tracing_v3/config.py +0 -84
- synth_ai/tracing_v3/storage/config.py +0 -62
- synth_ai/tracing_v3/turso/__init__.py +0 -25
- synth_ai/tracing_v3/turso/daemon.py +0 -144
- synth_ai/tracing_v3/turso/manager.py +0 -760
- synth_ai/v0/tracing/__init__.py +0 -0
- synth_ai/v0/tracing/abstractions.py +0 -224
- synth_ai/v0/tracing/base_client.py +0 -91
- synth_ai/v0/tracing/client_manager.py +0 -131
- synth_ai/v0/tracing/config.py +0 -140
- synth_ai/v0/tracing/context.py +0 -146
- synth_ai/v0/tracing/decorators.py +0 -680
- synth_ai/v0/tracing/events/__init__.py +0 -0
- synth_ai/v0/tracing/events/manage.py +0 -147
- synth_ai/v0/tracing/events/scope.py +0 -86
- synth_ai/v0/tracing/events/store.py +0 -228
- synth_ai/v0/tracing/immediate_client.py +0 -151
- synth_ai/v0/tracing/local.py +0 -18
- synth_ai/v0/tracing/log_client_base.py +0 -73
- synth_ai/v0/tracing/retry_queue.py +0 -186
- synth_ai/v0/tracing/trackers.py +0 -515
- synth_ai/v0/tracing/upload.py +0 -510
- synth_ai/v0/tracing/utils.py +0 -9
- synth_ai/v0/tracing_v1/__init__.py +0 -16
- synth_ai/v0/tracing_v1/abstractions.py +0 -224
- synth_ai/v0/tracing_v1/base_client.py +0 -91
- synth_ai/v0/tracing_v1/client_manager.py +0 -131
- synth_ai/v0/tracing_v1/config.py +0 -140
- synth_ai/v0/tracing_v1/context.py +0 -146
- synth_ai/v0/tracing_v1/decorators.py +0 -701
- synth_ai/v0/tracing_v1/events/__init__.py +0 -0
- synth_ai/v0/tracing_v1/events/manage.py +0 -147
- synth_ai/v0/tracing_v1/events/scope.py +0 -86
- synth_ai/v0/tracing_v1/events/store.py +0 -228
- synth_ai/v0/tracing_v1/immediate_client.py +0 -151
- synth_ai/v0/tracing_v1/local.py +0 -18
- synth_ai/v0/tracing_v1/log_client_base.py +0 -73
- synth_ai/v0/tracing_v1/retry_queue.py +0 -186
- synth_ai/v0/tracing_v1/trackers.py +0 -515
- synth_ai/v0/tracing_v1/upload.py +0 -525
- synth_ai/v0/tracing_v1/utils.py +0 -9
- synth_ai/zyk/__init__.py +0 -30
- synth_ai-0.2.6.dev1.dist-info/METADATA +0 -106
- synth_ai-0.2.6.dev1.dist-info/RECORD +0 -416
- /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/__init__.py +0 -0
- /synth_ai/{lm/caching → core/apps}/__init__.py +0 -0
- /synth_ai/{tracing_v3 → core/tracing_v3}/lm_call_record_abstractions.py +0 -0
- /synth_ai/{tracing_v3 → core/tracing_v3}/storage/__init__.py +0 -0
- /synth_ai/{tracing_v3 → core/tracing_v3}/storage/exceptions.py +0 -0
- /synth_ai/{tracing_v3 → core/tracing_v3}/storage/types.py +0 -0
- /synth_ai/{compound/cais.py → py.typed} +0 -0
- /synth_ai/{learning → sdk/learning}/core.py +0 -0
- /synth_ai/{learning → sdk/learning}/gateway.py +0 -0
- {synth_ai-0.2.6.dev1.dist-info → synth_ai-0.4.3.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.6.dev1.dist-info → synth_ai-0.4.3.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.6.dev1.dist-info → synth_ai-0.4.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,2199 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
import importlib
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from collections.abc import Callable, Mapping
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, NoReturn, cast
|
|
11
|
+
|
|
12
|
+
import click
|
|
13
|
+
|
|
14
|
+
from synth_ai.cli.lib.env import get_synth_and_env_keys, mask_str
|
|
15
|
+
from synth_ai.cli.lib.train_cfgs import find_train_cfgs_in_cwd, validate_train_cfg
|
|
16
|
+
from synth_ai.core.paths import print_paths_formatted
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
_config_module = cast(
|
|
20
|
+
Any, importlib.import_module("synth_ai.core.env")
|
|
21
|
+
)
|
|
22
|
+
get_backend_from_env = cast(Callable[[], str], _config_module.get_backend_from_env)
|
|
23
|
+
except Exception as exc: # pragma: no cover - critical dependency
|
|
24
|
+
raise RuntimeError("Unable to load backend configuration helpers") from exc
|
|
25
|
+
|
|
26
|
+
from synth_ai.cli.lib.env import load_env_file
|
|
27
|
+
from synth_ai.cli.lib.errors import format_error_message, get_required_value
|
|
28
|
+
from synth_ai.core.telemetry import flush_logger, log_error, log_info
|
|
29
|
+
from synth_ai.sdk.streaming import (
|
|
30
|
+
GraphGenHandler,
|
|
31
|
+
CLIHandler,
|
|
32
|
+
JobStreamer,
|
|
33
|
+
LossCurveHandler,
|
|
34
|
+
PromptLearningHandler,
|
|
35
|
+
StreamConfig,
|
|
36
|
+
StreamEndpoints,
|
|
37
|
+
StreamType,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
from .builders import build_prompt_learning_payload, build_rl_payload, build_sft_payload
|
|
41
|
+
from .local_api import check_local_api_health
|
|
42
|
+
from .graphgen import GraphGenJob
|
|
43
|
+
from .graphgen_models import load_graphgen_taskset
|
|
44
|
+
from .context_learning import ContextLearningJob
|
|
45
|
+
from .utils import (
|
|
46
|
+
TrainError,
|
|
47
|
+
ensure_api_base,
|
|
48
|
+
http_get,
|
|
49
|
+
http_post,
|
|
50
|
+
limit_jsonl_examples,
|
|
51
|
+
mask_value,
|
|
52
|
+
post_multipart,
|
|
53
|
+
preview_json,
|
|
54
|
+
sleep,
|
|
55
|
+
validate_sft_jsonl,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Constants for prompt learning event types
|
|
59
|
+
_PROMPT_LEARNING_EVENT_BEST_PROMPT = "prompt.learning.best.prompt"
|
|
60
|
+
_PROMPT_LEARNING_EVENT_FINAL_RESULTS = "prompt.learning.final.results"
|
|
61
|
+
_PROMPT_LEARNING_EVENT_VALIDATION_SCORED = "prompt.learning.validation.scored"
|
|
62
|
+
_PROMPT_LEARNING_EVENT_GEPA_COMPLETE = "prompt.learning.gepa.complete"
|
|
63
|
+
_PROMPT_LEARNING_EVENT_MIPRO_COMPLETE = "prompt.learning.mipro.complete"
|
|
64
|
+
_PROMPT_LEARNING_EVENT_GEPA_NEW_BEST = "prompt.learning.gepa.new_best"
|
|
65
|
+
_PROMPT_LEARNING_EVENT_PHASE_CHANGED = "prompt.learning.phase.changed"
|
|
66
|
+
_PROMPT_LEARNING_EVENT_PROGRESS = "prompt.learning.progress"
|
|
67
|
+
_PROMPT_LEARNING_EVENT_STREAM_CONNECTED = "prompt.learning.stream.connected"
|
|
68
|
+
|
|
69
|
+
# Constants for formatting
|
|
70
|
+
_MAX_TEXT_REPLACEMENTS_DISPLAY = 3 # Max number of text replacements to show in output
|
|
71
|
+
_RESULTS_FILE_MAX_EVENTS = 10000 # Max events to fetch for results file generation
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _format_text_replacements(obj: dict[str, Any] | None, max_display: int = _MAX_TEXT_REPLACEMENTS_DISPLAY) -> list[str]:
|
|
75
|
+
"""Extract and format text replacements from a candidate object.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
obj: Candidate object dictionary containing text_replacements
|
|
79
|
+
max_display: Maximum number of replacements to display
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
List of formatted lines showing role and replacement text
|
|
83
|
+
"""
|
|
84
|
+
lines = []
|
|
85
|
+
if not obj or not isinstance(obj, dict):
|
|
86
|
+
return lines
|
|
87
|
+
|
|
88
|
+
text_replacements = obj.get("text_replacements", [])
|
|
89
|
+
if not text_replacements or not isinstance(text_replacements, list):
|
|
90
|
+
return lines
|
|
91
|
+
|
|
92
|
+
for replacement in text_replacements[:max_display]:
|
|
93
|
+
if isinstance(replacement, dict):
|
|
94
|
+
new_text = replacement.get("new_text", "")
|
|
95
|
+
role = replacement.get("apply_to_role", "system")
|
|
96
|
+
if new_text:
|
|
97
|
+
lines.append(f" [{role.upper()}]: {new_text}")
|
|
98
|
+
lines.append("")
|
|
99
|
+
|
|
100
|
+
return lines
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _default_backend() -> str:
|
|
104
|
+
"""Resolve backend URL with proper production default.
|
|
105
|
+
|
|
106
|
+
Priority order:
|
|
107
|
+
1. BACKEND_BASE_URL env var (highest priority) - checked FIRST before any .env loading
|
|
108
|
+
2. BACKEND_OVERRIDE env var
|
|
109
|
+
3. get_backend_from_env() standard resolution (which may use SYNTH_BASE_URL from .env)
|
|
110
|
+
|
|
111
|
+
CRITICAL: This function MUST check BACKEND_BASE_URL directly from os.getenv()
|
|
112
|
+
to ensure it's not overridden by .env file loading.
|
|
113
|
+
"""
|
|
114
|
+
# Check explicit override first (BACKEND_BASE_URL takes absolute precedence)
|
|
115
|
+
# Read directly from os.environ to avoid any dotenv interference
|
|
116
|
+
explicit = os.environ.get("BACKEND_BASE_URL", "").strip()
|
|
117
|
+
if explicit:
|
|
118
|
+
# Return as-is, ensure_api_base() will normalize it
|
|
119
|
+
return explicit
|
|
120
|
+
|
|
121
|
+
# Fallback to BACKEND_OVERRIDE (also read directly from environ)
|
|
122
|
+
override = os.environ.get("BACKEND_OVERRIDE", "").strip()
|
|
123
|
+
if override:
|
|
124
|
+
return override
|
|
125
|
+
|
|
126
|
+
# Use standard resolution logic (may use SYNTH_BASE_URL from .env)
|
|
127
|
+
base, _ = get_backend_from_env()
|
|
128
|
+
return f"{base}/api" if not base.endswith("/api") else base
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
_DEFAULT_SFT_HIDDEN_EVENTS = {
|
|
132
|
+
"sft.created",
|
|
133
|
+
"sft.pricing.check.requested",
|
|
134
|
+
"sft.pricing.check.allowed",
|
|
135
|
+
"sft.stage",
|
|
136
|
+
"snapshot.fetch",
|
|
137
|
+
"hatchet.preflight",
|
|
138
|
+
"hatchet.submission.attempt",
|
|
139
|
+
"hatchet.submission.result",
|
|
140
|
+
"sft.running",
|
|
141
|
+
"sft.status",
|
|
142
|
+
"sft.worker.alive",
|
|
143
|
+
"sft.dispatch.selected",
|
|
144
|
+
"sft.config.prepared",
|
|
145
|
+
"sft.strategy.selected",
|
|
146
|
+
"sft.training.args",
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
_DEFAULT_RL_HIDDEN_SUBSTRINGS = {"modal", "hatchet"}
|
|
150
|
+
|
|
151
|
+
_DEFAULT_PROMPT_LEARNING_HIDDEN_EVENTS = {
|
|
152
|
+
"prompt.learning.policy.tokens",
|
|
153
|
+
"mipro.bootstrap.progress", # Hide individual bootstrap seed scores
|
|
154
|
+
"mipro.tpe.rankings", # Hide verbose TPE rankings
|
|
155
|
+
"mipro.tpe.selected", # Hide TPE selection details
|
|
156
|
+
"mipro.tpe.update", # Hide TPE density updates
|
|
157
|
+
"mipro.trial.duplicate", # Hide duplicate trial messages
|
|
158
|
+
"mipro.trial.started", # Hide individual trial start messages (too verbose with instructions)
|
|
159
|
+
"mipro.trial.minibatch", # Hide minibatch completion (only show full eval)
|
|
160
|
+
"mipro.trial.complete", # Hide individual trial completion
|
|
161
|
+
"mipro.iteration.skip_generation", # Hide skip generation messages
|
|
162
|
+
"mipro.budget.update", # Hide verbose budget updates (progress handler shows summary)
|
|
163
|
+
"mipro.instruction.proposed", # Hide proposed instructions (shown in results/logs only)
|
|
164
|
+
"gepa.transformation.proposed", # Hide proposed transformations (shown in results/logs only)
|
|
165
|
+
# Note: mipro.stage_proposer.called is shown so users know instruction generation is happening
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _load_toml_config(config_path: Path) -> dict[str, Any]:
|
|
170
|
+
"""Load TOML config file."""
|
|
171
|
+
try:
|
|
172
|
+
import tomli # type: ignore[import-untyped]
|
|
173
|
+
except ImportError:
|
|
174
|
+
# Fallback to tomllib for Python 3.11+
|
|
175
|
+
try:
|
|
176
|
+
import tomllib as tomli
|
|
177
|
+
except ImportError:
|
|
178
|
+
return {}
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
with open(config_path, "rb") as f:
|
|
182
|
+
return tomli.load(f)
|
|
183
|
+
except Exception:
|
|
184
|
+
return {}
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def parse_env_file_path_from_config(config_path: Path) -> str | None:
|
|
188
|
+
"""Parse env_file_path from TOML config.
|
|
189
|
+
|
|
190
|
+
Checks both [prompt_learning] and top-level sections.
|
|
191
|
+
"""
|
|
192
|
+
config = _load_toml_config(config_path)
|
|
193
|
+
|
|
194
|
+
# Check prompt_learning section first
|
|
195
|
+
pl_section = config.get("prompt_learning", {})
|
|
196
|
+
if isinstance(pl_section, dict):
|
|
197
|
+
env_file_path = pl_section.get("env_file_path")
|
|
198
|
+
if env_file_path:
|
|
199
|
+
return str(env_file_path)
|
|
200
|
+
|
|
201
|
+
# Check top-level
|
|
202
|
+
env_file_path = config.get("env_file_path")
|
|
203
|
+
if env_file_path:
|
|
204
|
+
return str(env_file_path)
|
|
205
|
+
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def parse_results_folder(config_path: Path) -> Path:
|
|
210
|
+
"""Parse results_folder from TOML config and validate it exists.
|
|
211
|
+
|
|
212
|
+
Checks both [prompt_learning] and top-level sections.
|
|
213
|
+
Raises ClickException if missing or invalid.
|
|
214
|
+
"""
|
|
215
|
+
config = _load_toml_config(config_path)
|
|
216
|
+
|
|
217
|
+
# Check prompt_learning section first
|
|
218
|
+
pl_section = config.get("prompt_learning", {})
|
|
219
|
+
if isinstance(pl_section, dict):
|
|
220
|
+
results_folder = pl_section.get("results_folder")
|
|
221
|
+
if results_folder:
|
|
222
|
+
results_folder_str = str(results_folder).strip()
|
|
223
|
+
# Resolve relative to config file's directory if path is relative
|
|
224
|
+
if not Path(results_folder_str).is_absolute():
|
|
225
|
+
config_dir = config_path.parent.resolve()
|
|
226
|
+
results_path = (config_dir / results_folder_str).resolve()
|
|
227
|
+
else:
|
|
228
|
+
results_path = Path(results_folder_str).expanduser().resolve()
|
|
229
|
+
|
|
230
|
+
# Validate that the folder exists or can be created
|
|
231
|
+
try:
|
|
232
|
+
results_path.mkdir(parents=True, exist_ok=True)
|
|
233
|
+
except (OSError, PermissionError) as e:
|
|
234
|
+
raise click.ClickException(
|
|
235
|
+
f"Could not create results folder: {results_path}\n"
|
|
236
|
+
f" Error: {e}\n"
|
|
237
|
+
f" Config: {config_path}\n"
|
|
238
|
+
f" TOML results_folder: {results_folder}"
|
|
239
|
+
) from e
|
|
240
|
+
|
|
241
|
+
return results_path
|
|
242
|
+
|
|
243
|
+
# Check top-level section
|
|
244
|
+
results_folder = config.get("results_folder")
|
|
245
|
+
if results_folder:
|
|
246
|
+
results_folder_str = str(results_folder).strip()
|
|
247
|
+
# Resolve relative to config file's directory if path is relative
|
|
248
|
+
if not Path(results_folder_str).is_absolute():
|
|
249
|
+
config_dir = config_path.parent.resolve()
|
|
250
|
+
results_path = (config_dir / results_folder_str).resolve()
|
|
251
|
+
else:
|
|
252
|
+
results_path = Path(results_folder_str).expanduser().resolve()
|
|
253
|
+
|
|
254
|
+
# Validate that the folder exists or can be created
|
|
255
|
+
try:
|
|
256
|
+
results_path.mkdir(parents=True, exist_ok=True)
|
|
257
|
+
except (OSError, PermissionError) as e:
|
|
258
|
+
raise click.ClickException(
|
|
259
|
+
f"Could not create results folder: {results_path}\n"
|
|
260
|
+
f" Error: {e}\n"
|
|
261
|
+
f" Config: {config_path}\n"
|
|
262
|
+
f" TOML results_folder: {results_folder}"
|
|
263
|
+
) from e
|
|
264
|
+
|
|
265
|
+
return results_path
|
|
266
|
+
|
|
267
|
+
# Missing - raise error
|
|
268
|
+
raise click.ClickException(
|
|
269
|
+
f"Missing required 'results_folder' field in TOML config: {config_path}\n"
|
|
270
|
+
f" Please add 'results_folder = \"path/to/results\"' to [prompt_learning] section or top-level.\n"
|
|
271
|
+
f" Paths can be relative (to config file directory) or absolute."
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def parse_display_config(config_path: Path) -> dict[str, Any]:
|
|
276
|
+
"""Parse [display] section from TOML config."""
|
|
277
|
+
config = _load_toml_config(config_path)
|
|
278
|
+
display_section = config.get("display", {})
|
|
279
|
+
|
|
280
|
+
# Also extract termination_config for max limits
|
|
281
|
+
termination_section = config.get("termination_config", {})
|
|
282
|
+
# Also check prompt_learning.termination_config
|
|
283
|
+
pl_section = config.get("prompt_learning", {})
|
|
284
|
+
if isinstance(pl_section, dict):
|
|
285
|
+
pl_termination = pl_section.get("termination_config", {})
|
|
286
|
+
if isinstance(pl_termination, dict):
|
|
287
|
+
# Merge with top-level termination_config (top-level takes precedence)
|
|
288
|
+
termination_section = {**pl_termination, **termination_section}
|
|
289
|
+
|
|
290
|
+
return {
|
|
291
|
+
"local_backend": display_section.get("local_backend", False),
|
|
292
|
+
"tui": display_section.get("tui", False),
|
|
293
|
+
"show_curve": display_section.get("show_curve", True),
|
|
294
|
+
"verbose_summary": display_section.get("verbose_summary", True),
|
|
295
|
+
"show_trial_results": display_section.get("show_trial_results", True),
|
|
296
|
+
"show_transformations": display_section.get("show_transformations", False),
|
|
297
|
+
"show_validation": display_section.get("show_validation", True),
|
|
298
|
+
"max_tokens": termination_section.get("max_tokens"),
|
|
299
|
+
"max_time_seconds": termination_section.get("max_time_seconds"),
|
|
300
|
+
"max_rollouts": termination_section.get("max_rollouts"),
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def _build_stream_components(
|
|
305
|
+
stream_format: str,
|
|
306
|
+
*,
|
|
307
|
+
hidden_event_types: set[str] | None = None,
|
|
308
|
+
hidden_event_substrings: set[str] | None = None,
|
|
309
|
+
) -> tuple[StreamConfig, list]:
|
|
310
|
+
"""Return stream configuration and handlers for the requested format."""
|
|
311
|
+
if stream_format == "chart":
|
|
312
|
+
config = StreamConfig(
|
|
313
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
314
|
+
event_types={
|
|
315
|
+
"sft.progress",
|
|
316
|
+
"sft.training.started",
|
|
317
|
+
"sft.training.finish",
|
|
318
|
+
"sft.validation.summary",
|
|
319
|
+
"rl.train.step",
|
|
320
|
+
"rl.train.started",
|
|
321
|
+
"rl.train.completed",
|
|
322
|
+
"workflow.completed",
|
|
323
|
+
"workflow.failed",
|
|
324
|
+
},
|
|
325
|
+
metric_names={"train.loss"},
|
|
326
|
+
)
|
|
327
|
+
handlers = [LossCurveHandler()]
|
|
328
|
+
else:
|
|
329
|
+
config = StreamConfig.default()
|
|
330
|
+
handlers = [
|
|
331
|
+
CLIHandler(
|
|
332
|
+
hidden_event_types=hidden_event_types or set(),
|
|
333
|
+
hidden_event_substrings=hidden_event_substrings or set(),
|
|
334
|
+
)
|
|
335
|
+
]
|
|
336
|
+
return config, handlers
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def _validate_openai_key_if_provider_is_openai(cfg_path: Path) -> None:
|
|
340
|
+
"""Validate that OPENAI_API_KEY is set if the provider is OpenAI.
|
|
341
|
+
|
|
342
|
+
For prompt learning jobs, checks if policy.provider is 'openai' and raises
|
|
343
|
+
a ClickException if OPENAI_API_KEY is not set in the environment.
|
|
344
|
+
"""
|
|
345
|
+
cfg = _load_toml_config(cfg_path)
|
|
346
|
+
|
|
347
|
+
# Check prompt_learning section
|
|
348
|
+
pl_section = cfg.get("prompt_learning", {})
|
|
349
|
+
if not isinstance(pl_section, dict):
|
|
350
|
+
return
|
|
351
|
+
|
|
352
|
+
policy = pl_section.get("policy", {})
|
|
353
|
+
if not isinstance(policy, dict):
|
|
354
|
+
return
|
|
355
|
+
|
|
356
|
+
provider = policy.get("provider", "").lower()
|
|
357
|
+
|
|
358
|
+
if provider == "openai":
|
|
359
|
+
openai_key = os.environ.get("OPENAI_API_KEY", "").strip()
|
|
360
|
+
if not openai_key:
|
|
361
|
+
raise click.ClickException(
|
|
362
|
+
"OPENAI_API_KEY is required when using provider='openai'.\n"
|
|
363
|
+
"Please set OPENAI_API_KEY in your .env file or environment."
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
# Module-level logging to track import and registration
|
|
368
|
+
import logging as _logging # noqa: E402
|
|
369
|
+
import sys # noqa: E402
|
|
370
|
+
|
|
371
|
+
_logger = _logging.getLogger(__name__)
|
|
372
|
+
_logger.debug("[TRAIN_MODULE] Module synth_ai.sdk.api.train.cli imported")
|
|
373
|
+
|
|
374
|
+
@click.command("train")
|
|
375
|
+
@click.argument(
|
|
376
|
+
"cfg_path",
|
|
377
|
+
required=False,
|
|
378
|
+
type=click.Path(exists=True, path_type=Path)
|
|
379
|
+
)
|
|
380
|
+
@click.option(
|
|
381
|
+
"--env",
|
|
382
|
+
"env_file",
|
|
383
|
+
type=click.Path(exists=True, path_type=Path),
|
|
384
|
+
help=".env file(s) to preload (skips selection prompt)",
|
|
385
|
+
)
|
|
386
|
+
@click.option(
|
|
387
|
+
"--task-url",
|
|
388
|
+
default=None,
|
|
389
|
+
help="Override task app base URL (RL only)"
|
|
390
|
+
)
|
|
391
|
+
@click.option(
|
|
392
|
+
"--dataset",
|
|
393
|
+
"dataset_path",
|
|
394
|
+
type=click.Path(),
|
|
395
|
+
default=None,
|
|
396
|
+
help="Override dataset JSONL path (SFT)",
|
|
397
|
+
)
|
|
398
|
+
@click.option("--model", default=None, help="Override model identifier")
|
|
399
|
+
@click.option(
|
|
400
|
+
"--allow-experimental",
|
|
401
|
+
"allow_experimental",
|
|
402
|
+
is_flag=True,
|
|
403
|
+
flag_value=True,
|
|
404
|
+
default=None,
|
|
405
|
+
help="Allow experimental models (overrides SDK_EXPERIMENTAL env)",
|
|
406
|
+
)
|
|
407
|
+
@click.option(
|
|
408
|
+
"--no-allow-experimental",
|
|
409
|
+
"allow_experimental",
|
|
410
|
+
is_flag=True,
|
|
411
|
+
flag_value=False,
|
|
412
|
+
help="Disallow experimental models (overrides SDK_EXPERIMENTAL env)",
|
|
413
|
+
)
|
|
414
|
+
@click.option("--idempotency", default=None, help="Idempotency-Key header for job creation")
|
|
415
|
+
@click.option("--dry-run", is_flag=True, hidden=True, help="Deprecated: no-op")
|
|
416
|
+
@click.option("--poll/--no-poll", default=True, help="Poll job status until terminal state")
|
|
417
|
+
@click.option(
|
|
418
|
+
"--poll-timeout", default=3600.0, type=float, help="Maximum seconds to poll before timing out"
|
|
419
|
+
)
|
|
420
|
+
@click.option("--poll-interval", default=5.0, type=float, help="Seconds between poll attempts")
|
|
421
|
+
@click.option(
|
|
422
|
+
"--stream-format",
|
|
423
|
+
type=click.Choice(["cli", "chart"]),
|
|
424
|
+
default="cli",
|
|
425
|
+
show_default=True,
|
|
426
|
+
help="Streaming output style (cli = line updates, chart = live loss panel)",
|
|
427
|
+
)
|
|
428
|
+
@click.option(
|
|
429
|
+
"--examples",
|
|
430
|
+
"examples_limit",
|
|
431
|
+
type=int,
|
|
432
|
+
default=None,
|
|
433
|
+
help="Limit SFT training to the first N examples",
|
|
434
|
+
)
|
|
435
|
+
@click.option(
|
|
436
|
+
"--backend",
|
|
437
|
+
"backend_override",
|
|
438
|
+
default=None,
|
|
439
|
+
help="Backend base URL (e.g., http://localhost:8000). Overrides BACKEND_BASE_URL env var.",
|
|
440
|
+
)
|
|
441
|
+
@click.option(
|
|
442
|
+
"--local-backend",
|
|
443
|
+
is_flag=True,
|
|
444
|
+
default=None,
|
|
445
|
+
help="Use local backend (localhost:8000). Overrides TOML [display].local_backend",
|
|
446
|
+
)
|
|
447
|
+
@click.option(
|
|
448
|
+
"--tui",
|
|
449
|
+
is_flag=True,
|
|
450
|
+
default=None,
|
|
451
|
+
help="Enable live TUI dashboard. Overrides TOML [display].tui",
|
|
452
|
+
)
|
|
453
|
+
@click.option(
|
|
454
|
+
"--show-curve",
|
|
455
|
+
is_flag=True,
|
|
456
|
+
default=None,
|
|
457
|
+
help="Show optimization curve at end. Overrides TOML [display].show_curve",
|
|
458
|
+
)
|
|
459
|
+
@click.option(
|
|
460
|
+
"--verbose-summary",
|
|
461
|
+
is_flag=True,
|
|
462
|
+
default=None,
|
|
463
|
+
help="Show detailed final summary. Overrides TOML [display].verbose_summary",
|
|
464
|
+
)
|
|
465
|
+
@click.option(
|
|
466
|
+
"--type",
|
|
467
|
+
"train_type_override",
|
|
468
|
+
type=click.Choice(["prompt", "rl", "sft", "graphgen", "adas", "context_learning"]),
|
|
469
|
+
default=None,
|
|
470
|
+
help=(
|
|
471
|
+
"Explicitly set training type. Required for GraphGen (uses JSON datasets). "
|
|
472
|
+
"'adas' is a legacy alias."
|
|
473
|
+
),
|
|
474
|
+
)
|
|
475
|
+
@click.option(
|
|
476
|
+
"--rollout-budget",
|
|
477
|
+
"rollout_budget",
|
|
478
|
+
type=int,
|
|
479
|
+
default=None,
|
|
480
|
+
help="Rollout budget for GraphGen optimization (default: 100)",
|
|
481
|
+
)
|
|
482
|
+
@click.option(
|
|
483
|
+
"--proposer-effort",
|
|
484
|
+
"proposer_effort",
|
|
485
|
+
type=click.Choice(["low", "medium", "high"]),
|
|
486
|
+
default=None,
|
|
487
|
+
help="Proposer effort level for GraphGen (default: medium)",
|
|
488
|
+
)
|
|
489
|
+
def train_command(
|
|
490
|
+
cfg_path: Path | None,
|
|
491
|
+
env_file: Path | None,
|
|
492
|
+
task_url: str | None,
|
|
493
|
+
dataset_path: str | None,
|
|
494
|
+
model: str | None,
|
|
495
|
+
allow_experimental: bool | None,
|
|
496
|
+
idempotency: str | None,
|
|
497
|
+
dry_run: bool,
|
|
498
|
+
poll: bool,
|
|
499
|
+
poll_timeout: float,
|
|
500
|
+
poll_interval: float,
|
|
501
|
+
stream_format: str,
|
|
502
|
+
examples_limit: int | None,
|
|
503
|
+
backend_override: str | None,
|
|
504
|
+
local_backend: bool | None,
|
|
505
|
+
tui: bool | None,
|
|
506
|
+
show_curve: bool | None,
|
|
507
|
+
verbose_summary: bool | None,
|
|
508
|
+
train_type_override: str | None,
|
|
509
|
+
rollout_budget: int | None,
|
|
510
|
+
proposer_effort: str | None,
|
|
511
|
+
) -> None:
|
|
512
|
+
|
|
513
|
+
"""Interactive launcher for RL / SFT / Prompt Learning / GraphGen / Context Learning jobs."""
|
|
514
|
+
import traceback
|
|
515
|
+
|
|
516
|
+
ctx: dict[str, Any] = {
|
|
517
|
+
"cfg_path": str(cfg_path) if cfg_path else None,
|
|
518
|
+
"poll": poll,
|
|
519
|
+
"poll_timeout": poll_timeout,
|
|
520
|
+
"poll_interval": poll_interval,
|
|
521
|
+
"stream_format": stream_format,
|
|
522
|
+
"backend_override": backend_override,
|
|
523
|
+
}
|
|
524
|
+
log_info("train_command invoked", ctx=ctx)
|
|
525
|
+
|
|
526
|
+
# Wrap entire function in try-except to catch ALL exceptions
|
|
527
|
+
try:
|
|
528
|
+
# Log entry point IMMEDIATELY - this should always appear
|
|
529
|
+
sys.stderr.write("[TRAIN_CMD] Starting train command\n")
|
|
530
|
+
sys.stderr.flush()
|
|
531
|
+
click.echo(f"[TRAIN_CMD] Args: cfg_path={cfg_path}, poll={poll}", err=True)
|
|
532
|
+
click.echo(f"[TRAIN_CMD] Python executable: {sys.executable}", err=True)
|
|
533
|
+
click.echo(f"[TRAIN_CMD] Working directory: {os.getcwd()}", err=True)
|
|
534
|
+
|
|
535
|
+
try:
|
|
536
|
+
load_env_file()
|
|
537
|
+
click.echo("[TRAIN_CMD] Environment file loaded", err=True)
|
|
538
|
+
except Exception as e:
|
|
539
|
+
click.echo(f"[TRAIN_CMD] ERROR loading env file: {e}", err=True)
|
|
540
|
+
traceback.print_exc(file=sys.stderr)
|
|
541
|
+
raise
|
|
542
|
+
|
|
543
|
+
# CRITICAL: Load explicit .env file BEFORE config validation to ensure BACKEND_BASE_URL is available
|
|
544
|
+
if env_file and Path(env_file).exists():
|
|
545
|
+
from dotenv import load_dotenv
|
|
546
|
+
# Load with override=True to ensure BACKEND_BASE_URL from .env takes precedence
|
|
547
|
+
load_dotenv(Path(env_file), override=True)
|
|
548
|
+
click.echo(f"[TRAIN_CMD] Loaded explicit .env: {env_file}", err=True)
|
|
549
|
+
|
|
550
|
+
# Handle GraphGen specially - it uses JSON datasets, not TOML configs
|
|
551
|
+
if train_type_override in ("graphgen", "adas"):
|
|
552
|
+
# For GraphGen, dataset_path is required and cfg_path is ignored
|
|
553
|
+
if not dataset_path:
|
|
554
|
+
raise click.ClickException(
|
|
555
|
+
"GraphGen requires --dataset flag with path to JSON dataset file.\n"
|
|
556
|
+
"Usage: synth-ai train --type graphgen --dataset my_tasks.json"
|
|
557
|
+
)
|
|
558
|
+
train_type = train_type_override
|
|
559
|
+
click.echo(f"[TRAIN_CMD] GraphGen mode: using dataset {dataset_path}", err=True)
|
|
560
|
+
else:
|
|
561
|
+
# Non-GraphGen: use TOML config
|
|
562
|
+
if not cfg_path:
|
|
563
|
+
available_cfgs = find_train_cfgs_in_cwd()
|
|
564
|
+
if len(available_cfgs) == 1:
|
|
565
|
+
train_type, cfg_path_str, _ = available_cfgs[0]
|
|
566
|
+
cfg_path = Path(cfg_path_str)
|
|
567
|
+
print(f"Automatically selected {train_type} training config at", cfg_path)
|
|
568
|
+
else:
|
|
569
|
+
if len(available_cfgs) == 0:
|
|
570
|
+
print("No training config found in cwd.")
|
|
571
|
+
print("Validate your training config: synth-ai train-cfg check [CFG_PATH]")
|
|
572
|
+
else:
|
|
573
|
+
print("Multiple training configs found. Please specify which one to use:")
|
|
574
|
+
print_paths_formatted(available_cfgs)
|
|
575
|
+
print("Usage: synth-ai train --config [CFG_PATH]")
|
|
576
|
+
return None
|
|
577
|
+
|
|
578
|
+
train_type = train_type_override or validate_train_cfg(cfg_path)
|
|
579
|
+
|
|
580
|
+
synth_api_key, _ = get_synth_and_env_keys(env_file)
|
|
581
|
+
|
|
582
|
+
# Resolve backend URL with priority: --backend flag > BACKEND_BASE_URL env > default
|
|
583
|
+
if backend_override:
|
|
584
|
+
# CLI flag takes highest precedence
|
|
585
|
+
backend_base = ensure_api_base(backend_override.strip())
|
|
586
|
+
click.echo(f"Backend base: {backend_base} (from --backend flag)")
|
|
587
|
+
else:
|
|
588
|
+
# Check BACKEND_BASE_URL AFTER loading env file
|
|
589
|
+
backend_base_url_env = os.environ.get("BACKEND_BASE_URL", "").strip()
|
|
590
|
+
backend_override_env = os.environ.get("BACKEND_OVERRIDE", "").strip()
|
|
591
|
+
|
|
592
|
+
# Debug: Show what env vars are set
|
|
593
|
+
click.echo(f"🔍 DEBUG: BACKEND_BASE_URL={backend_base_url_env or '(not set)'}", err=True)
|
|
594
|
+
click.echo(f"🔍 DEBUG: BACKEND_OVERRIDE={backend_override_env or '(not set)'}", err=True)
|
|
595
|
+
|
|
596
|
+
# Use _default_backend() to respect BACKEND_BASE_URL env var
|
|
597
|
+
backend_raw = _default_backend()
|
|
598
|
+
click.echo(f"🔍 DEBUG: _default_backend() returned: {backend_raw}", err=True)
|
|
599
|
+
backend_base = ensure_api_base(backend_raw)
|
|
600
|
+
|
|
601
|
+
# Assertion: Validate backend URL is what we expect
|
|
602
|
+
if backend_base_url_env:
|
|
603
|
+
expected_backend = ensure_api_base(backend_base_url_env)
|
|
604
|
+
if backend_base != expected_backend:
|
|
605
|
+
raise click.ClickException(
|
|
606
|
+
f"Backend URL mismatch! Expected: {expected_backend}, Got: {backend_base}. "
|
|
607
|
+
f"BACKEND_BASE_URL={backend_base_url_env} but resolved to {backend_base}. "
|
|
608
|
+
f"This indicates BACKEND_BASE_URL is not being respected.\n"
|
|
609
|
+
f"💡 Solutions:\n"
|
|
610
|
+
f" 1. Add BACKEND_BASE_URL=http://localhost:8000 to your .env file\n"
|
|
611
|
+
f" 2. Use --backend http://localhost:8000 flag (requires package rebuild)\n"
|
|
612
|
+
f" 3. Set BACKEND_OVERRIDE=http://localhost:8000 in your shell\n"
|
|
613
|
+
f" 4. Set SYNTH_BACKEND_URL_OVERRIDE=local and LOCAL_BACKEND_URL=http://localhost:8000"
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
click.echo(f"Backend base: {backend_base} (key {mask_str(synth_api_key)})")
|
|
617
|
+
if backend_base_url_env:
|
|
618
|
+
click.echo(f" (from BACKEND_BASE_URL={backend_base_url_env})")
|
|
619
|
+
|
|
620
|
+
# Skip TOML-based validation for GraphGen (uses JSON datasets)
|
|
621
|
+
if train_type not in ("adas", "graphgen") and cfg_path:
|
|
622
|
+
_validate_openai_key_if_provider_is_openai(cfg_path)
|
|
623
|
+
|
|
624
|
+
match train_type:
|
|
625
|
+
case "prompt":
|
|
626
|
+
if not cfg_path:
|
|
627
|
+
raise click.ClickException("Prompt Learning requires a TOML config file.")
|
|
628
|
+
handle_prompt_learning(
|
|
629
|
+
cfg_path=cfg_path,
|
|
630
|
+
backend_base=backend_base,
|
|
631
|
+
synth_key=synth_api_key,
|
|
632
|
+
task_url_override=task_url,
|
|
633
|
+
allow_experimental=allow_experimental,
|
|
634
|
+
dry_run=dry_run,
|
|
635
|
+
poll=poll,
|
|
636
|
+
poll_timeout=poll_timeout,
|
|
637
|
+
poll_interval=poll_interval,
|
|
638
|
+
stream_format=stream_format,
|
|
639
|
+
)
|
|
640
|
+
case "context_learning":
|
|
641
|
+
if not cfg_path:
|
|
642
|
+
raise click.ClickException(
|
|
643
|
+
"Context Learning requires a TOML config file.\n"
|
|
644
|
+
"Usage: synth-ai train --type context_learning --config my_context.toml"
|
|
645
|
+
)
|
|
646
|
+
handle_context_learning(
|
|
647
|
+
cfg_path=cfg_path,
|
|
648
|
+
backend_base=backend_base,
|
|
649
|
+
synth_key=synth_api_key,
|
|
650
|
+
poll=poll,
|
|
651
|
+
stream_format=stream_format,
|
|
652
|
+
)
|
|
653
|
+
case "rl":
|
|
654
|
+
if not cfg_path:
|
|
655
|
+
raise click.ClickException("RL requires a TOML config file.")
|
|
656
|
+
handle_rl(
|
|
657
|
+
cfg_path=cfg_path,
|
|
658
|
+
backend_base=backend_base,
|
|
659
|
+
synth_key=synth_api_key,
|
|
660
|
+
task_url_override=task_url,
|
|
661
|
+
model_override=model,
|
|
662
|
+
idempotency=idempotency,
|
|
663
|
+
allow_experimental=allow_experimental,
|
|
664
|
+
dry_run=dry_run,
|
|
665
|
+
poll=poll,
|
|
666
|
+
poll_timeout=poll_timeout,
|
|
667
|
+
poll_interval=poll_interval,
|
|
668
|
+
stream_format=stream_format,
|
|
669
|
+
)
|
|
670
|
+
case "sft":
|
|
671
|
+
if not cfg_path:
|
|
672
|
+
raise click.ClickException("SFT requires a TOML config file.")
|
|
673
|
+
dataset_override_path = Path(dataset_path).expanduser().resolve() if dataset_path else None
|
|
674
|
+
handle_sft(
|
|
675
|
+
cfg_path=cfg_path,
|
|
676
|
+
backend_base=backend_base,
|
|
677
|
+
synth_key=synth_api_key,
|
|
678
|
+
dataset_override=dataset_override_path,
|
|
679
|
+
allow_experimental=allow_experimental,
|
|
680
|
+
dry_run=dry_run,
|
|
681
|
+
poll=poll,
|
|
682
|
+
poll_timeout=poll_timeout,
|
|
683
|
+
poll_interval=poll_interval,
|
|
684
|
+
stream_format=stream_format,
|
|
685
|
+
examples_limit=examples_limit,
|
|
686
|
+
)
|
|
687
|
+
case "adas" | "graphgen":
|
|
688
|
+
if not dataset_path:
|
|
689
|
+
raise click.ClickException("GraphGen requires a dataset path.")
|
|
690
|
+
graphgen_dataset_path = Path(dataset_path).expanduser().resolve()
|
|
691
|
+
handle_graphgen(
|
|
692
|
+
dataset_path=graphgen_dataset_path,
|
|
693
|
+
backend_base=backend_base,
|
|
694
|
+
synth_key=synth_api_key,
|
|
695
|
+
policy_model=model,
|
|
696
|
+
rollout_budget=rollout_budget,
|
|
697
|
+
proposer_effort=proposer_effort,
|
|
698
|
+
poll=poll,
|
|
699
|
+
poll_timeout=poll_timeout,
|
|
700
|
+
poll_interval=poll_interval,
|
|
701
|
+
stream_format=stream_format,
|
|
702
|
+
)
|
|
703
|
+
except Exception as e:
|
|
704
|
+
ctx["error"] = type(e).__name__
|
|
705
|
+
log_error("train_command failed", ctx=ctx)
|
|
706
|
+
click.echo(f"[TRAIN_CMD] FATAL ERROR: {e}", err=True)
|
|
707
|
+
traceback.print_exc(file=sys.stderr)
|
|
708
|
+
raise
|
|
709
|
+
finally:
|
|
710
|
+
flush_logger()
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
def handle_context_learning(
|
|
714
|
+
*,
|
|
715
|
+
cfg_path: Path,
|
|
716
|
+
backend_base: str,
|
|
717
|
+
synth_key: str,
|
|
718
|
+
poll: bool,
|
|
719
|
+
stream_format: str,
|
|
720
|
+
) -> None:
|
|
721
|
+
"""Submit and stream a Context Learning job.
|
|
722
|
+
|
|
723
|
+
Context Learning is SSE-first; polling flags are ignored.
|
|
724
|
+
"""
|
|
725
|
+
if not poll:
|
|
726
|
+
click.echo("Note: --no-poll is ignored for context learning (SSE streaming only).")
|
|
727
|
+
|
|
728
|
+
click.echo("\n=== Submitting Context Learning Job ===")
|
|
729
|
+
try:
|
|
730
|
+
job = ContextLearningJob.from_config(
|
|
731
|
+
cfg_path,
|
|
732
|
+
backend_url=backend_base,
|
|
733
|
+
api_key=synth_key,
|
|
734
|
+
)
|
|
735
|
+
result = job.submit()
|
|
736
|
+
except Exception as e:
|
|
737
|
+
raise click.ClickException(str(e))
|
|
738
|
+
|
|
739
|
+
click.echo("\n✓ Job created:")
|
|
740
|
+
click.echo(f" Context Learning Job ID: {result.job_id}")
|
|
741
|
+
click.echo(f" Status: {result.status}")
|
|
742
|
+
|
|
743
|
+
click.echo("\n=== Streaming Job Progress ===")
|
|
744
|
+
if stream_format == "chart":
|
|
745
|
+
click.echo("Chart stream format is not supported for context learning; using CLI output.")
|
|
746
|
+
|
|
747
|
+
try:
|
|
748
|
+
final_status = job.stream_until_complete()
|
|
749
|
+
except Exception as e:
|
|
750
|
+
raise click.ClickException(str(e))
|
|
751
|
+
|
|
752
|
+
status = final_status.get("status") if isinstance(final_status, dict) else "unknown"
|
|
753
|
+
click.echo(f"\nFinal status: {status}")
|
|
754
|
+
click.echo(preview_json(final_status, limit=600))
|
|
755
|
+
|
|
756
|
+
if status in {"succeeded", "completed"}:
|
|
757
|
+
click.echo("\n=== Best Preflight Script ===")
|
|
758
|
+
try:
|
|
759
|
+
best = job.download_best_script()
|
|
760
|
+
if best.preflight_script:
|
|
761
|
+
click.echo(best.preflight_script[:2000])
|
|
762
|
+
if len(best.preflight_script) > 2000:
|
|
763
|
+
click.echo(
|
|
764
|
+
f"\n... (truncated, {len(best.preflight_script)} chars total)"
|
|
765
|
+
)
|
|
766
|
+
except Exception as e:
|
|
767
|
+
click.echo(f"⚠️ Could not download best script: {e}")
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
def _wait_for_training_file(
|
|
771
|
+
backend_base: str, api_key: str, file_id: str, *, timeout: float = 10.0
|
|
772
|
+
) -> None:
|
|
773
|
+
"""Wait for training file to be visible after upload.
|
|
774
|
+
|
|
775
|
+
Reduced from 120s to 10s because:
|
|
776
|
+
- POST response already confirms file is uploaded
|
|
777
|
+
- Backend now forces read-your-writes consistency
|
|
778
|
+
- By job creation time, replica lag has resolved
|
|
779
|
+
- Quick sanity check only, not critical path
|
|
780
|
+
"""
|
|
781
|
+
url = f"{backend_base.rstrip('/')}/files/{file_id}"
|
|
782
|
+
headers = {"Authorization": f"Bearer {api_key}"}
|
|
783
|
+
elapsed = 0.0
|
|
784
|
+
interval = 2.0
|
|
785
|
+
first_check = True
|
|
786
|
+
while True:
|
|
787
|
+
resp = http_get(url, headers=headers, timeout=30.0)
|
|
788
|
+
if resp.status_code == 200:
|
|
789
|
+
try:
|
|
790
|
+
data = resp.json()
|
|
791
|
+
except json.JSONDecodeError:
|
|
792
|
+
data = {}
|
|
793
|
+
status = str(
|
|
794
|
+
data.get("status") or data.get("state") or data.get("storage_state") or "ready"
|
|
795
|
+
).lower()
|
|
796
|
+
if first_check:
|
|
797
|
+
click.echo(f"File uploaded successfully (id={file_id}, status={status})")
|
|
798
|
+
first_check = False
|
|
799
|
+
if status in {"ready", "uploaded", "stored", "complete"}:
|
|
800
|
+
click.echo(f"✓ Training file ready (status={status})")
|
|
801
|
+
return
|
|
802
|
+
# Show progress for processing states
|
|
803
|
+
if status in {"processing", "pending", "validating"}:
|
|
804
|
+
click.echo(
|
|
805
|
+
f" Waiting for file processing... (status={status}, {elapsed:.0f}s elapsed)"
|
|
806
|
+
)
|
|
807
|
+
elif resp.status_code == 404:
|
|
808
|
+
# Keep polling; object may not be visible yet
|
|
809
|
+
if first_check:
|
|
810
|
+
click.echo(f"Waiting for file {file_id} to become visible...")
|
|
811
|
+
first_check = False
|
|
812
|
+
elif resp.status_code in {401, 403}:
|
|
813
|
+
# Auth errors won't resolve by polling - fail immediately
|
|
814
|
+
try:
|
|
815
|
+
error_body = resp.json()
|
|
816
|
+
except json.JSONDecodeError:
|
|
817
|
+
error_body = resp.text[:400]
|
|
818
|
+
click.echo("\n[ERROR] Authentication failed when checking training file:")
|
|
819
|
+
click.echo(f" URL: {url}")
|
|
820
|
+
click.echo(f" Status: {resp.status_code}")
|
|
821
|
+
click.echo(f" Response: {error_body}")
|
|
822
|
+
click.echo(f" API key: {mask_value(api_key)}")
|
|
823
|
+
raise click.ClickException(
|
|
824
|
+
f"Authentication error ({resp.status_code}). "
|
|
825
|
+
"Check that your SYNTH_API_KEY is valid and has permission to access this organization's files."
|
|
826
|
+
)
|
|
827
|
+
else:
|
|
828
|
+
# Other errors - show details but keep polling
|
|
829
|
+
try:
|
|
830
|
+
error_body = resp.json()
|
|
831
|
+
except json.JSONDecodeError:
|
|
832
|
+
error_body = resp.text[:400]
|
|
833
|
+
click.echo(f"[WARN] Unexpected response checking file {file_id}:")
|
|
834
|
+
click.echo(f" URL: {url}")
|
|
835
|
+
click.echo(f" Status: {resp.status_code}")
|
|
836
|
+
click.echo(f" Response: {error_body}")
|
|
837
|
+
|
|
838
|
+
if elapsed >= timeout:
|
|
839
|
+
raise click.ClickException(
|
|
840
|
+
f"Training file {file_id} not ready after {timeout:.0f}s (last status: {resp.status_code})"
|
|
841
|
+
)
|
|
842
|
+
sleep(interval)
|
|
843
|
+
elapsed += interval
|
|
844
|
+
|
|
845
|
+
|
|
846
|
+
def handle_rl(
|
|
847
|
+
*,
|
|
848
|
+
cfg_path: Path,
|
|
849
|
+
backend_base: str,
|
|
850
|
+
synth_key: str,
|
|
851
|
+
task_url_override: str | None,
|
|
852
|
+
model_override: str | None,
|
|
853
|
+
idempotency: str | None,
|
|
854
|
+
allow_experimental: bool | None,
|
|
855
|
+
dry_run: bool,
|
|
856
|
+
poll: bool,
|
|
857
|
+
poll_timeout: float,
|
|
858
|
+
poll_interval: float,
|
|
859
|
+
stream_format: str,
|
|
860
|
+
) -> None:
|
|
861
|
+
ctx: dict[str, Any] = {
|
|
862
|
+
"cfg_path": str(cfg_path),
|
|
863
|
+
"backend_base": backend_base,
|
|
864
|
+
"task_url_override": task_url_override,
|
|
865
|
+
"poll": poll,
|
|
866
|
+
}
|
|
867
|
+
log_info("handle_rl invoked", ctx=ctx)
|
|
868
|
+
overrides: dict[str, Any] = {
|
|
869
|
+
"backend": backend_base,
|
|
870
|
+
"task_url": task_url_override,
|
|
871
|
+
"model": model_override,
|
|
872
|
+
}
|
|
873
|
+
build = build_rl_payload(
|
|
874
|
+
config_path=cfg_path,
|
|
875
|
+
task_url=task_url_override or os.environ.get("TASK_APP_URL", ""),
|
|
876
|
+
overrides=overrides,
|
|
877
|
+
idempotency=idempotency,
|
|
878
|
+
allow_experimental=allow_experimental,
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
# Backend-side verification: try ALL org environment keys against /health and /task_info
|
|
882
|
+
verify_url = f"{backend_base}/rl/verify_task_app"
|
|
883
|
+
verify_headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
|
|
884
|
+
try:
|
|
885
|
+
vresp = http_post(
|
|
886
|
+
verify_url, headers=verify_headers, json_body={"endpoint_base_url": build.task_url}
|
|
887
|
+
)
|
|
888
|
+
try:
|
|
889
|
+
parsed_json = vresp.json()
|
|
890
|
+
except json.JSONDecodeError:
|
|
891
|
+
parsed_json = None
|
|
892
|
+
|
|
893
|
+
if isinstance(parsed_json, Mapping):
|
|
894
|
+
vjs: dict[str, Any] = dict(parsed_json)
|
|
895
|
+
else:
|
|
896
|
+
vjs = {
|
|
897
|
+
"status": vresp.status_code,
|
|
898
|
+
"text": (vresp.text or "")[:400],
|
|
899
|
+
}
|
|
900
|
+
if parsed_json is not None:
|
|
901
|
+
vjs["body"] = parsed_json
|
|
902
|
+
except Exception as _ve:
|
|
903
|
+
raise click.ClickException(
|
|
904
|
+
f"Task app verification call failed: {type(_ve).__name__}: {_ve}"
|
|
905
|
+
) from _ve
|
|
906
|
+
if vresp.status_code is not None and vresp.status_code >= 400:
|
|
907
|
+
click.echo("Task app verification error:\n" + preview_json(vjs, limit=800))
|
|
908
|
+
raise click.ClickException(f"Verification failed with status {vresp.status_code}")
|
|
909
|
+
if not bool(vjs.get("any_ok")):
|
|
910
|
+
click.echo("Task app verification failed; no auth combination succeeded. Full report:")
|
|
911
|
+
click.echo(preview_json(vjs, limit=1200))
|
|
912
|
+
raise click.ClickException("Task app verification failed (auth)")
|
|
913
|
+
else:
|
|
914
|
+
# Print concise summary
|
|
915
|
+
try:
|
|
916
|
+
cands = vjs.get("candidates_first15") or []
|
|
917
|
+
attempts_raw = vjs.get("attempts")
|
|
918
|
+
attempts: list[Mapping[str, Any]] = (
|
|
919
|
+
[a for a in attempts_raw if isinstance(a, Mapping)]
|
|
920
|
+
if isinstance(attempts_raw, list)
|
|
921
|
+
else []
|
|
922
|
+
)
|
|
923
|
+
statuses = [attempt.get("status") for attempt in attempts]
|
|
924
|
+
click.echo(f"Verification OK (candidates={cands}, statuses={statuses})")
|
|
925
|
+
except (KeyError, ValueError, AttributeError):
|
|
926
|
+
# Parsing verification summary failed, but verification itself succeeded
|
|
927
|
+
click.echo("Verification OK")
|
|
928
|
+
|
|
929
|
+
env_key = get_required_value(
|
|
930
|
+
"environment_api_key",
|
|
931
|
+
env_value=os.environ.get("ENVIRONMENT_API_KEY"),
|
|
932
|
+
)
|
|
933
|
+
os.environ["ENVIRONMENT_API_KEY"] = env_key
|
|
934
|
+
|
|
935
|
+
click.echo("Performing task app health check…")
|
|
936
|
+
health = check_local_api_health(build.task_url, env_key)
|
|
937
|
+
if not health.ok:
|
|
938
|
+
click.echo(f"Task app health check failed: {health.detail}")
|
|
939
|
+
raise click.ClickException("Aborting due to failing health check")
|
|
940
|
+
else:
|
|
941
|
+
click.echo("Task app healthy")
|
|
942
|
+
|
|
943
|
+
create_url = f"{backend_base}/rl/jobs"
|
|
944
|
+
headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
|
|
945
|
+
if build.idempotency:
|
|
946
|
+
headers["Idempotency-Key"] = build.idempotency
|
|
947
|
+
|
|
948
|
+
click.echo(f"POST {create_url}")
|
|
949
|
+
click.echo("Payload preview:\n" + preview_json(build.payload, limit=800))
|
|
950
|
+
|
|
951
|
+
resp = http_post(create_url, headers=headers, json_body=build.payload)
|
|
952
|
+
try:
|
|
953
|
+
js = resp.json()
|
|
954
|
+
except json.JSONDecodeError as e:
|
|
955
|
+
click.echo(f"⚠️ Failed to parse JSON response: {e}")
|
|
956
|
+
js = {"status": resp.status_code, "text": resp.text[:400]}
|
|
957
|
+
click.echo(f"Response {resp.status_code}: {preview_json(js, limit=400)}")
|
|
958
|
+
if resp.status_code not in (200, 201):
|
|
959
|
+
raise click.ClickException("Job creation failed")
|
|
960
|
+
job_id = js.get("job_id") or js.get("id")
|
|
961
|
+
if not job_id:
|
|
962
|
+
raise click.ClickException("Response missing job id")
|
|
963
|
+
|
|
964
|
+
if not poll:
|
|
965
|
+
click.echo(f"Created job {job_id} (polling disabled)")
|
|
966
|
+
return
|
|
967
|
+
|
|
968
|
+
click.echo("\n=== Streaming Job Progress ===")
|
|
969
|
+
|
|
970
|
+
# Enable metrics for prompt learning
|
|
971
|
+
if stream_format == "chart":
|
|
972
|
+
config = StreamConfig(
|
|
973
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
974
|
+
event_types={
|
|
975
|
+
"prompt.learning.progress",
|
|
976
|
+
"prompt.learning.gepa.start",
|
|
977
|
+
"prompt.learning.gepa.complete",
|
|
978
|
+
},
|
|
979
|
+
metric_names={"gepa.transformation.mean_score"},
|
|
980
|
+
)
|
|
981
|
+
handlers = [LossCurveHandler()]
|
|
982
|
+
click.echo("Using live chart (metric=gepa.transformation.mean_score)")
|
|
983
|
+
else:
|
|
984
|
+
config = StreamConfig(
|
|
985
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
986
|
+
metric_names={"gepa.transformation.mean_score"},
|
|
987
|
+
)
|
|
988
|
+
handlers = [CLIHandler(hidden_event_substrings=_DEFAULT_RL_HIDDEN_SUBSTRINGS)]
|
|
989
|
+
|
|
990
|
+
streamer = JobStreamer(
|
|
991
|
+
base_url=backend_base,
|
|
992
|
+
api_key=synth_key,
|
|
993
|
+
job_id=job_id,
|
|
994
|
+
endpoints=StreamEndpoints.rl(job_id),
|
|
995
|
+
config=config,
|
|
996
|
+
handlers=handlers,
|
|
997
|
+
interval_seconds=poll_interval,
|
|
998
|
+
timeout_seconds=poll_timeout,
|
|
999
|
+
)
|
|
1000
|
+
final_status = asyncio.run(streamer.stream_until_terminal())
|
|
1001
|
+
click.echo(f"Final status: {final_status.get('status', 'unknown')}")
|
|
1002
|
+
click.echo(preview_json(final_status, limit=600))
|
|
1003
|
+
|
|
1004
|
+
|
|
1005
|
+
def handle_sft(
|
|
1006
|
+
*,
|
|
1007
|
+
cfg_path: Path,
|
|
1008
|
+
backend_base: str,
|
|
1009
|
+
synth_key: str,
|
|
1010
|
+
dataset_override: Path | None,
|
|
1011
|
+
allow_experimental: bool | None,
|
|
1012
|
+
dry_run: bool,
|
|
1013
|
+
poll: bool,
|
|
1014
|
+
poll_timeout: float,
|
|
1015
|
+
poll_interval: float,
|
|
1016
|
+
stream_format: str,
|
|
1017
|
+
examples_limit: int | None,
|
|
1018
|
+
) -> None:
|
|
1019
|
+
ctx: dict[str, Any] = {
|
|
1020
|
+
"cfg_path": str(cfg_path),
|
|
1021
|
+
"backend_base": backend_base,
|
|
1022
|
+
"dataset_override": str(dataset_override) if dataset_override else None,
|
|
1023
|
+
"poll": poll,
|
|
1024
|
+
}
|
|
1025
|
+
log_info("handle_sft invoked", ctx=ctx)
|
|
1026
|
+
try:
|
|
1027
|
+
build = build_sft_payload(
|
|
1028
|
+
config_path=cfg_path,
|
|
1029
|
+
dataset_override=dataset_override,
|
|
1030
|
+
allow_experimental=allow_experimental,
|
|
1031
|
+
)
|
|
1032
|
+
except TrainError as exc:
|
|
1033
|
+
_raise_sft_usage_error(exc)
|
|
1034
|
+
|
|
1035
|
+
limited_path: Path | None = None
|
|
1036
|
+
|
|
1037
|
+
try:
|
|
1038
|
+
if examples_limit is not None:
|
|
1039
|
+
limited_path = limit_jsonl_examples(build.train_file, examples_limit)
|
|
1040
|
+
click.echo(
|
|
1041
|
+
f"Using first {examples_limit} examples from {build.train_file} -> {limited_path}"
|
|
1042
|
+
)
|
|
1043
|
+
build.train_file = limited_path
|
|
1044
|
+
|
|
1045
|
+
click.echo("Validating training dataset…")
|
|
1046
|
+
validate_sft_jsonl(build.train_file)
|
|
1047
|
+
if build.validation_file and build.validation_file.suffix == ".jsonl":
|
|
1048
|
+
click.echo("Validating validation dataset…")
|
|
1049
|
+
validate_sft_jsonl(build.validation_file)
|
|
1050
|
+
|
|
1051
|
+
upload_url = f"{backend_base.rstrip('/')}/files"
|
|
1052
|
+
click.echo("\n=== Uploading Training Data ===")
|
|
1053
|
+
click.echo(f"Dataset: {build.train_file}")
|
|
1054
|
+
click.echo(f"Destination: {upload_url}")
|
|
1055
|
+
resp = post_multipart(
|
|
1056
|
+
upload_url, api_key=synth_key, file_field="file", file_path=build.train_file
|
|
1057
|
+
)
|
|
1058
|
+
js = (
|
|
1059
|
+
resp.json()
|
|
1060
|
+
if resp.headers.get("content-type", "").startswith("application/json")
|
|
1061
|
+
else {}
|
|
1062
|
+
)
|
|
1063
|
+
if resp.status_code is not None and resp.status_code >= 400 or "id" not in js:
|
|
1064
|
+
click.echo("\n[ERROR] Training file upload failed:")
|
|
1065
|
+
click.echo(f" URL: {upload_url}")
|
|
1066
|
+
click.echo(f" Status: {resp.status_code}")
|
|
1067
|
+
click.echo(f" Response: {js or resp.text[:400]}")
|
|
1068
|
+
click.echo(f" File: {build.train_file}")
|
|
1069
|
+
raise click.ClickException(
|
|
1070
|
+
f"Training file upload failed with status {resp.status_code}"
|
|
1071
|
+
)
|
|
1072
|
+
train_file_id = js["id"]
|
|
1073
|
+
click.echo(f"✓ Training file uploaded (id={train_file_id})")
|
|
1074
|
+
val_file_id = None
|
|
1075
|
+
if build.validation_file:
|
|
1076
|
+
click.echo(f"Uploading validation dataset: {build.validation_file}")
|
|
1077
|
+
vresp = post_multipart(
|
|
1078
|
+
upload_url,
|
|
1079
|
+
api_key=synth_key,
|
|
1080
|
+
file_field="file",
|
|
1081
|
+
file_path=build.validation_file,
|
|
1082
|
+
)
|
|
1083
|
+
vjs = (
|
|
1084
|
+
vresp.json()
|
|
1085
|
+
if vresp.headers.get("content-type", "").startswith("application/json")
|
|
1086
|
+
else {}
|
|
1087
|
+
)
|
|
1088
|
+
if vresp.status_code is not None and vresp.status_code < 400 and "id" in vjs:
|
|
1089
|
+
val_file_id = vjs["id"]
|
|
1090
|
+
click.echo(f"✓ Validation file uploaded (id={val_file_id})")
|
|
1091
|
+
else:
|
|
1092
|
+
click.echo(
|
|
1093
|
+
f"[WARN] Validation upload failed ({vresp.status_code}): {vjs or vresp.text[:200]}"
|
|
1094
|
+
)
|
|
1095
|
+
payload = dict(build.payload)
|
|
1096
|
+
payload["training_file_id"] = train_file_id
|
|
1097
|
+
if val_file_id:
|
|
1098
|
+
payload.setdefault("metadata", {}).setdefault("effective_config", {}).setdefault(
|
|
1099
|
+
"data", {}
|
|
1100
|
+
)["validation_files"] = [val_file_id]
|
|
1101
|
+
|
|
1102
|
+
click.echo("\n=== Checking File Processing Status ===")
|
|
1103
|
+
try:
|
|
1104
|
+
_wait_for_training_file(backend_base, synth_key, train_file_id)
|
|
1105
|
+
except click.ClickException as exc:
|
|
1106
|
+
click.echo(f"[WARN] File readiness check failed: {exc}")
|
|
1107
|
+
click.echo("Proceeding anyway - backend will validate file during job creation...")
|
|
1108
|
+
|
|
1109
|
+
click.echo("\n=== Creating Training Job ===")
|
|
1110
|
+
click.echo("Job payload preview:")
|
|
1111
|
+
click.echo(preview_json(payload, limit=800))
|
|
1112
|
+
|
|
1113
|
+
create_url = f"{backend_base}/learning/jobs"
|
|
1114
|
+
headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
|
|
1115
|
+
click.echo(f"\nPOST {create_url}")
|
|
1116
|
+
resp = http_post(create_url, headers=headers, json_body=payload)
|
|
1117
|
+
js = (
|
|
1118
|
+
resp.json()
|
|
1119
|
+
if resp.headers.get("content-type", "").startswith("application/json")
|
|
1120
|
+
else {}
|
|
1121
|
+
)
|
|
1122
|
+
if resp.status_code not in (200, 201):
|
|
1123
|
+
click.echo("\n[ERROR] Job creation failed:")
|
|
1124
|
+
click.echo(f" URL: {create_url}")
|
|
1125
|
+
click.echo(f" Status: {resp.status_code}")
|
|
1126
|
+
click.echo(f" Response: {preview_json(js, limit=600)}")
|
|
1127
|
+
raise click.ClickException(f"Job creation failed with status {resp.status_code}")
|
|
1128
|
+
job_id = js.get("job_id") or js.get("id")
|
|
1129
|
+
if not job_id:
|
|
1130
|
+
raise click.ClickException("Response missing job id")
|
|
1131
|
+
click.echo(f"✓ Job created (id={job_id})")
|
|
1132
|
+
|
|
1133
|
+
click.echo("\n=== Starting Training Job ===")
|
|
1134
|
+
start_url = f"{backend_base}/learning/jobs/{job_id}/start"
|
|
1135
|
+
click.echo(f"POST {start_url}")
|
|
1136
|
+
start_resp = http_post(start_url, headers=headers, json_body={})
|
|
1137
|
+
if start_resp.status_code not in (200, 201):
|
|
1138
|
+
click.echo(f"[WARN] Job start returned status {start_resp.status_code}")
|
|
1139
|
+
else:
|
|
1140
|
+
click.echo("✓ Job started")
|
|
1141
|
+
|
|
1142
|
+
if not poll:
|
|
1143
|
+
click.echo(f"Started job {job_id} (polling disabled)")
|
|
1144
|
+
return
|
|
1145
|
+
|
|
1146
|
+
click.echo("\n=== Streaming Job Progress ===")
|
|
1147
|
+
config, handlers = _build_stream_components(
|
|
1148
|
+
stream_format, hidden_event_types=_DEFAULT_SFT_HIDDEN_EVENTS
|
|
1149
|
+
)
|
|
1150
|
+
if stream_format == "chart":
|
|
1151
|
+
click.echo("Using live loss chart (metric=train.loss)")
|
|
1152
|
+
streamer = JobStreamer(
|
|
1153
|
+
base_url=backend_base,
|
|
1154
|
+
api_key=synth_key,
|
|
1155
|
+
job_id=job_id,
|
|
1156
|
+
endpoints=StreamEndpoints.learning(job_id),
|
|
1157
|
+
config=config,
|
|
1158
|
+
handlers=handlers,
|
|
1159
|
+
interval_seconds=poll_interval,
|
|
1160
|
+
timeout_seconds=poll_timeout,
|
|
1161
|
+
)
|
|
1162
|
+
final_status = asyncio.run(streamer.stream_until_terminal())
|
|
1163
|
+
status = final_status.get('status') if isinstance(final_status, dict) else 'unknown'
|
|
1164
|
+
click.echo(f"Final status: {status}")
|
|
1165
|
+
click.echo(preview_json(final_status, limit=600))
|
|
1166
|
+
finally:
|
|
1167
|
+
if limited_path is not None:
|
|
1168
|
+
with contextlib.suppress(OSError):
|
|
1169
|
+
limited_path.unlink(missing_ok=True)
|
|
1170
|
+
# Clean up empty parent directory if possible
|
|
1171
|
+
with contextlib.suppress(OSError):
|
|
1172
|
+
limited_path.parent.rmdir()
|
|
1173
|
+
|
|
1174
|
+
|
|
1175
|
+
def handle_graphgen(
|
|
1176
|
+
*,
|
|
1177
|
+
dataset_path: Path,
|
|
1178
|
+
backend_base: str,
|
|
1179
|
+
synth_key: str,
|
|
1180
|
+
policy_model: str | None,
|
|
1181
|
+
rollout_budget: int | None,
|
|
1182
|
+
proposer_effort: str | None,
|
|
1183
|
+
poll: bool,
|
|
1184
|
+
poll_timeout: float,
|
|
1185
|
+
poll_interval: float,
|
|
1186
|
+
stream_format: str,
|
|
1187
|
+
) -> None:
|
|
1188
|
+
"""Handle GraphGen workflow optimization job creation and streaming.
|
|
1189
|
+
|
|
1190
|
+
GraphGen uses JSON dataset files and auto-generates task apps.
|
|
1191
|
+
"""
|
|
1192
|
+
ctx: dict[str, Any] = {
|
|
1193
|
+
"dataset_path": str(dataset_path),
|
|
1194
|
+
"backend_base": backend_base,
|
|
1195
|
+
"poll": poll,
|
|
1196
|
+
}
|
|
1197
|
+
log_info("handle_graphgen invoked", ctx=ctx)
|
|
1198
|
+
|
|
1199
|
+
# Load dataset
|
|
1200
|
+
click.echo(f"Loading GraphGen dataset from: {dataset_path}")
|
|
1201
|
+
try:
|
|
1202
|
+
dataset = load_graphgen_taskset(dataset_path)
|
|
1203
|
+
except FileNotFoundError:
|
|
1204
|
+
raise click.ClickException(f"Dataset file not found: {dataset_path}")
|
|
1205
|
+
except ValueError as e:
|
|
1206
|
+
raise click.ClickException(f"Invalid GraphGen dataset format: {e}")
|
|
1207
|
+
|
|
1208
|
+
problem_spec = None
|
|
1209
|
+
try:
|
|
1210
|
+
raw_dataset = json.loads(dataset_path.read_text())
|
|
1211
|
+
problem_spec = raw_dataset.get("problem_spec") or raw_dataset.get("initial_prompt")
|
|
1212
|
+
except Exception:
|
|
1213
|
+
problem_spec = None
|
|
1214
|
+
|
|
1215
|
+
click.echo(f"Dataset loaded: {dataset.metadata.name}")
|
|
1216
|
+
click.echo(f" Tasks: {len(dataset.tasks)}")
|
|
1217
|
+
click.echo(f" Gold outputs: {len(dataset.gold_outputs)}")
|
|
1218
|
+
click.echo(f" Judge mode: {dataset.judge_config.mode}")
|
|
1219
|
+
|
|
1220
|
+
# Create GraphGen job
|
|
1221
|
+
job = GraphGenJob.from_dataset(
|
|
1222
|
+
dataset=dataset,
|
|
1223
|
+
policy_model=policy_model or "gpt-4o-mini",
|
|
1224
|
+
rollout_budget=rollout_budget or 100,
|
|
1225
|
+
proposer_effort=proposer_effort or "medium", # type: ignore
|
|
1226
|
+
problem_spec=problem_spec,
|
|
1227
|
+
backend_url=backend_base,
|
|
1228
|
+
api_key=synth_key,
|
|
1229
|
+
auto_start=True,
|
|
1230
|
+
)
|
|
1231
|
+
|
|
1232
|
+
click.echo("\n=== Submitting GraphGen Job ===")
|
|
1233
|
+
click.echo(f"Policy model: {job.config.policy_model}")
|
|
1234
|
+
click.echo(f"Rollout budget: {job.config.rollout_budget}")
|
|
1235
|
+
click.echo(f"Proposer effort: {job.config.proposer_effort}")
|
|
1236
|
+
|
|
1237
|
+
try:
|
|
1238
|
+
result = job.submit()
|
|
1239
|
+
except RuntimeError as e:
|
|
1240
|
+
raise click.ClickException(str(e))
|
|
1241
|
+
|
|
1242
|
+
click.echo(f"\n✓ Job created:")
|
|
1243
|
+
click.echo(f" GraphGen Job ID: {result.graphgen_job_id}")
|
|
1244
|
+
click.echo(f" Status: {result.status}")
|
|
1245
|
+
|
|
1246
|
+
if not poll:
|
|
1247
|
+
click.echo(f"\nCreated job {result.graphgen_job_id} (polling disabled)")
|
|
1248
|
+
return
|
|
1249
|
+
|
|
1250
|
+
click.echo("\n=== Streaming Job Progress ===")
|
|
1251
|
+
|
|
1252
|
+
# Build stream handlers
|
|
1253
|
+
if stream_format == "chart":
|
|
1254
|
+
config = StreamConfig(
|
|
1255
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
1256
|
+
metric_names={"gepa.transformation.mean_score"},
|
|
1257
|
+
)
|
|
1258
|
+
handlers = [LossCurveHandler()]
|
|
1259
|
+
click.echo("Using live loss chart (metric=gepa.transformation.mean_score)")
|
|
1260
|
+
else:
|
|
1261
|
+
config = StreamConfig(
|
|
1262
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
1263
|
+
max_events_per_poll=500,
|
|
1264
|
+
deduplicate=True,
|
|
1265
|
+
)
|
|
1266
|
+
handlers = [GraphGenHandler()]
|
|
1267
|
+
|
|
1268
|
+
# Stream until complete
|
|
1269
|
+
try:
|
|
1270
|
+
final_status = job.stream_until_complete(
|
|
1271
|
+
timeout=poll_timeout,
|
|
1272
|
+
interval=poll_interval,
|
|
1273
|
+
handlers=handlers,
|
|
1274
|
+
)
|
|
1275
|
+
except TimeoutError as e:
|
|
1276
|
+
raise click.ClickException(str(e))
|
|
1277
|
+
|
|
1278
|
+
status = final_status.get('status') if isinstance(final_status, dict) else 'unknown'
|
|
1279
|
+
click.echo(f"\nFinal status: {status}")
|
|
1280
|
+
click.echo(preview_json(final_status, limit=600))
|
|
1281
|
+
|
|
1282
|
+
# Download and display best prompt if succeeded
|
|
1283
|
+
if status == "succeeded" or status == "completed":
|
|
1284
|
+
click.echo("\n=== Best Optimized Prompt ===")
|
|
1285
|
+
try:
|
|
1286
|
+
prompt = job.download_prompt()
|
|
1287
|
+
if prompt:
|
|
1288
|
+
click.echo(prompt[:2000])
|
|
1289
|
+
if len(prompt) > 2000:
|
|
1290
|
+
click.echo(f"\n... (truncated, {len(prompt)} chars total)")
|
|
1291
|
+
except Exception as e:
|
|
1292
|
+
click.echo(f"⚠️ Could not download prompt: {e}")
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
def _raise_sft_usage_error(exc: TrainError) -> NoReturn:
|
|
1296
|
+
message = str(exc).strip()
|
|
1297
|
+
lower_msg = message.lower()
|
|
1298
|
+
context = "Preparing SFT training job payload"
|
|
1299
|
+
impact = "Cannot submit training job without a valid dataset path"
|
|
1300
|
+
|
|
1301
|
+
if "dataset not specified" in lower_msg:
|
|
1302
|
+
raise click.UsageError(
|
|
1303
|
+
format_error_message(
|
|
1304
|
+
summary="Dataset path required",
|
|
1305
|
+
context=context,
|
|
1306
|
+
problem="No dataset path was provided via config or CLI",
|
|
1307
|
+
impact=impact,
|
|
1308
|
+
solutions=[
|
|
1309
|
+
("Add [job].data = \"/path/to/data.jsonl\" to the config", "Persist the dataset path in the TOML file"),
|
|
1310
|
+
("Re-run with --dataset /path/to/data.jsonl", "Override the dataset path from the CLI"),
|
|
1311
|
+
("Use an absolute path accessible from the current working directory", "Relative paths are resolved from the shell cwd"),
|
|
1312
|
+
],
|
|
1313
|
+
)
|
|
1314
|
+
) from exc
|
|
1315
|
+
|
|
1316
|
+
if "dataset not found" in lower_msg:
|
|
1317
|
+
raise click.UsageError(
|
|
1318
|
+
format_error_message(
|
|
1319
|
+
summary="Dataset path not found",
|
|
1320
|
+
context=context,
|
|
1321
|
+
problem=message,
|
|
1322
|
+
impact=impact,
|
|
1323
|
+
solutions=[
|
|
1324
|
+
("Verify the dataset path exists on disk", "Double-check spelling and that the file hasn't moved"),
|
|
1325
|
+
("Provide an absolute path to the dataset file", "Avoid relying on relative paths that resolve incorrectly"),
|
|
1326
|
+
("Sync the dataset to this machine before running the CLI", "Remote paths must be accessible locally"),
|
|
1327
|
+
],
|
|
1328
|
+
)
|
|
1329
|
+
) from exc
|
|
1330
|
+
|
|
1331
|
+
raise click.ClickException(message) from exc
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
def _save_verbose_log_file(
|
|
1335
|
+
events: list[dict[str, Any]],
|
|
1336
|
+
log_file: Path,
|
|
1337
|
+
algorithm_name: str,
|
|
1338
|
+
job_id: str,
|
|
1339
|
+
append_summary: bool = False,
|
|
1340
|
+
) -> None:
|
|
1341
|
+
"""Save a verbose log file with all events in chronological order, including summary.
|
|
1342
|
+
|
|
1343
|
+
If append_summary is True, only append the summary section (events were already streamed live).
|
|
1344
|
+
"""
|
|
1345
|
+
import json
|
|
1346
|
+
from datetime import datetime
|
|
1347
|
+
|
|
1348
|
+
try:
|
|
1349
|
+
lines = []
|
|
1350
|
+
if not append_summary:
|
|
1351
|
+
# Full log file with header and all events
|
|
1352
|
+
lines.append("=" * 80)
|
|
1353
|
+
lines.append(f"{algorithm_name} PROMPT LEARNING VERBOSE LOG")
|
|
1354
|
+
lines.append("=" * 80)
|
|
1355
|
+
lines.append(f"Job ID: {job_id}")
|
|
1356
|
+
lines.append(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
1357
|
+
lines.append(f"Total Events: {len(events)}")
|
|
1358
|
+
lines.append("=" * 80)
|
|
1359
|
+
lines.append("")
|
|
1360
|
+
|
|
1361
|
+
# Sort events by timestamp if available
|
|
1362
|
+
def get_timestamp(event: dict[str, Any]) -> str:
|
|
1363
|
+
return event.get("timestamp", event.get("created_at", ""))
|
|
1364
|
+
|
|
1365
|
+
sorted_events = sorted(events, key=get_timestamp)
|
|
1366
|
+
|
|
1367
|
+
# Only include events if not appending summary (events were already streamed live)
|
|
1368
|
+
if not append_summary:
|
|
1369
|
+
for idx, event in enumerate(sorted_events, 1):
|
|
1370
|
+
if not isinstance(event, dict):
|
|
1371
|
+
continue
|
|
1372
|
+
|
|
1373
|
+
event_type = event.get("type", "unknown")
|
|
1374
|
+
timestamp = event.get("timestamp") or event.get("created_at", "")
|
|
1375
|
+
level = event.get("level", "info")
|
|
1376
|
+
message = event.get("message", "")
|
|
1377
|
+
data = event.get("data", {})
|
|
1378
|
+
|
|
1379
|
+
lines.append(f"[{idx}] {timestamp} [{level.upper()}] {event_type}")
|
|
1380
|
+
if message:
|
|
1381
|
+
lines.append(f" Message: {message}")
|
|
1382
|
+
if data:
|
|
1383
|
+
# Format data nicely (truncate very long values)
|
|
1384
|
+
formatted_data = {}
|
|
1385
|
+
for key, value in data.items():
|
|
1386
|
+
if isinstance(value, dict | list):
|
|
1387
|
+
# Convert to JSON string, truncate if too long
|
|
1388
|
+
json_str = json.dumps(value, indent=2)
|
|
1389
|
+
if len(json_str) > 1000:
|
|
1390
|
+
json_str = json_str[:1000] + "... (truncated)"
|
|
1391
|
+
formatted_data[key] = json_str
|
|
1392
|
+
elif isinstance(value, str) and len(value) > 500:
|
|
1393
|
+
formatted_data[key] = value[:500] + "... (truncated)"
|
|
1394
|
+
else:
|
|
1395
|
+
formatted_data[key] = value
|
|
1396
|
+
|
|
1397
|
+
if formatted_data:
|
|
1398
|
+
lines.append(f" Data: {json.dumps(formatted_data, indent=2)}")
|
|
1399
|
+
lines.append("")
|
|
1400
|
+
|
|
1401
|
+
# Add summary table and chart at the end (always included)
|
|
1402
|
+
if append_summary:
|
|
1403
|
+
lines.append("\n\n")
|
|
1404
|
+
lines.append("=" * 80)
|
|
1405
|
+
lines.append("FINAL SUMMARY")
|
|
1406
|
+
lines.append("=" * 80)
|
|
1407
|
+
|
|
1408
|
+
try:
|
|
1409
|
+
from .summary import _generate_summary_text
|
|
1410
|
+
# Extract optimization curve from events
|
|
1411
|
+
optimization_curve = None
|
|
1412
|
+
trial_scores = []
|
|
1413
|
+
for event in sorted_events:
|
|
1414
|
+
if isinstance(event, dict):
|
|
1415
|
+
event_type = event.get("type", "")
|
|
1416
|
+
if event_type in ("prompt.learning.trial.complete", "mipro.new_incumbent"):
|
|
1417
|
+
data = event.get("data", {})
|
|
1418
|
+
trial_num = data.get("trial") or data.get("trial_num")
|
|
1419
|
+
score = data.get("score") or data.get("minibatch_score")
|
|
1420
|
+
if trial_num is not None and score is not None:
|
|
1421
|
+
trial_scores.append((trial_num, score))
|
|
1422
|
+
|
|
1423
|
+
if trial_scores:
|
|
1424
|
+
best_so_far = {}
|
|
1425
|
+
for trial_num, score in sorted(trial_scores):
|
|
1426
|
+
if trial_num not in best_so_far or score > best_so_far[trial_num]:
|
|
1427
|
+
best_so_far[trial_num] = score
|
|
1428
|
+
optimization_curve = sorted(best_so_far.items())
|
|
1429
|
+
|
|
1430
|
+
summary_text, curve_text = _generate_summary_text(
|
|
1431
|
+
events=sorted_events,
|
|
1432
|
+
algorithm=algorithm_name.lower() if algorithm_name else None,
|
|
1433
|
+
optimization_curve=optimization_curve,
|
|
1434
|
+
)
|
|
1435
|
+
if summary_text:
|
|
1436
|
+
lines.append(summary_text)
|
|
1437
|
+
if curve_text:
|
|
1438
|
+
lines.append("")
|
|
1439
|
+
lines.append(curve_text)
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
lines.append(f"⚠️ Could not generate summary: {e}")
|
|
1442
|
+
|
|
1443
|
+
lines.append("=" * 80)
|
|
1444
|
+
lines.append("END OF LOG")
|
|
1445
|
+
lines.append("=" * 80)
|
|
1446
|
+
|
|
1447
|
+
# Write to file (append if summary-only mode)
|
|
1448
|
+
mode = "a" if append_summary else "w"
|
|
1449
|
+
with open(log_file, mode, encoding="utf-8") as f:
|
|
1450
|
+
if append_summary:
|
|
1451
|
+
f.write("\n")
|
|
1452
|
+
f.write("\n".join(lines))
|
|
1453
|
+
|
|
1454
|
+
except Exception as e:
|
|
1455
|
+
click.echo(f"⚠️ Could not save verbose log file: {e}")
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
def _save_prompt_learning_results_locally(
|
|
1459
|
+
*,
|
|
1460
|
+
backend_base: str,
|
|
1461
|
+
api_key: str,
|
|
1462
|
+
job_id: str,
|
|
1463
|
+
config_path: Path,
|
|
1464
|
+
results_folder: Path,
|
|
1465
|
+
) -> None:
|
|
1466
|
+
"""Fetch events and generate results file locally after prompt learning completes."""
|
|
1467
|
+
from datetime import datetime
|
|
1468
|
+
|
|
1469
|
+
try:
|
|
1470
|
+
# Fetch all events
|
|
1471
|
+
url = f"{backend_base}/prompt-learning/online/jobs/{job_id}/events?limit={_RESULTS_FILE_MAX_EVENTS}"
|
|
1472
|
+
headers = {"Authorization": f"Bearer {api_key}"}
|
|
1473
|
+
resp = http_get(url, headers=headers, timeout=30.0)
|
|
1474
|
+
|
|
1475
|
+
if resp.status_code != 200:
|
|
1476
|
+
click.echo(f"⚠️ Could not fetch events to generate results file (status={resp.status_code})")
|
|
1477
|
+
return
|
|
1478
|
+
|
|
1479
|
+
data = resp.json()
|
|
1480
|
+
# Handle both list response (backend) and dict response (legacy compatibility)
|
|
1481
|
+
if isinstance(data, list):
|
|
1482
|
+
events = data
|
|
1483
|
+
elif isinstance(data, dict):
|
|
1484
|
+
events = data.get("events", [])
|
|
1485
|
+
if not isinstance(events, list):
|
|
1486
|
+
click.echo(f"⚠️ Events field is not a list: {type(events).__name__}")
|
|
1487
|
+
return
|
|
1488
|
+
else:
|
|
1489
|
+
click.echo(f"⚠️ Unexpected response type: {type(data).__name__}")
|
|
1490
|
+
return
|
|
1491
|
+
|
|
1492
|
+
if not events:
|
|
1493
|
+
return
|
|
1494
|
+
|
|
1495
|
+
# Extract key data from events
|
|
1496
|
+
best_score = None
|
|
1497
|
+
best_prompt = None
|
|
1498
|
+
baseline_score = None
|
|
1499
|
+
attempted_candidates = []
|
|
1500
|
+
optimized_candidates = []
|
|
1501
|
+
mipro_topk_candidates = [] # Collect MIPRO top-K candidates
|
|
1502
|
+
proposed_instructions = [] # Collect proposed instructions from MIPRO
|
|
1503
|
+
proposed_transformations = [] # Collect proposed transformations from GEPA
|
|
1504
|
+
|
|
1505
|
+
for event in events:
|
|
1506
|
+
if not isinstance(event, dict):
|
|
1507
|
+
continue # Skip malformed events
|
|
1508
|
+
|
|
1509
|
+
event_type = event.get("type", "")
|
|
1510
|
+
event_data = event.get("data", {})
|
|
1511
|
+
if not isinstance(event_data, dict):
|
|
1512
|
+
event_data = {} # Fallback to empty dict for safety
|
|
1513
|
+
|
|
1514
|
+
if event_type == _PROMPT_LEARNING_EVENT_BEST_PROMPT:
|
|
1515
|
+
best_score = event_data.get("best_score")
|
|
1516
|
+
best_prompt = event_data.get("best_prompt")
|
|
1517
|
+
elif event_type == _PROMPT_LEARNING_EVENT_FINAL_RESULTS:
|
|
1518
|
+
attempted_candidates = event_data.get("attempted_candidates", [])
|
|
1519
|
+
optimized_candidates = event_data.get("optimized_candidates", [])
|
|
1520
|
+
elif event_type == _PROMPT_LEARNING_EVENT_VALIDATION_SCORED:
|
|
1521
|
+
# Check if this is the baseline by checking for is_baseline flag or baseline in message
|
|
1522
|
+
is_baseline = event_data.get("is_baseline", False)
|
|
1523
|
+
if not is_baseline:
|
|
1524
|
+
msg = event.get("message", "")
|
|
1525
|
+
is_baseline = "baseline" in msg.lower()
|
|
1526
|
+
if is_baseline:
|
|
1527
|
+
baseline_score = event_data.get("accuracy")
|
|
1528
|
+
elif event_type == _PROMPT_LEARNING_EVENT_GEPA_COMPLETE and best_score is None:
|
|
1529
|
+
best_score = event_data.get("best_score")
|
|
1530
|
+
elif event_type == _PROMPT_LEARNING_EVENT_MIPRO_COMPLETE:
|
|
1531
|
+
# MIPRO completion event includes best_prompt and best_score
|
|
1532
|
+
if best_score is None:
|
|
1533
|
+
best_score = event_data.get("best_score")
|
|
1534
|
+
if best_prompt is None:
|
|
1535
|
+
best_prompt = event_data.get("best_prompt")
|
|
1536
|
+
elif event_type == "mipro.topk.evaluated":
|
|
1537
|
+
# Extract MIPRO top-K candidate data with full details
|
|
1538
|
+
rank = event_data.get("rank")
|
|
1539
|
+
train_score = event_data.get("train_score")
|
|
1540
|
+
test_score = event_data.get("test_score")
|
|
1541
|
+
if rank is not None and train_score is not None and test_score is not None:
|
|
1542
|
+
# Extract full instruction text (may be multi-line)
|
|
1543
|
+
instruction_text = event_data.get("instruction_text", "")
|
|
1544
|
+
if not instruction_text:
|
|
1545
|
+
# Try to get from instruction_lines if available
|
|
1546
|
+
instruction_lines = event_data.get("instruction_lines", [])
|
|
1547
|
+
if instruction_lines:
|
|
1548
|
+
instruction_text = "\n".join(str(line) for line in instruction_lines)
|
|
1549
|
+
|
|
1550
|
+
mipro_topk_candidates.append({
|
|
1551
|
+
"rank": rank,
|
|
1552
|
+
"train_score": train_score,
|
|
1553
|
+
"test_score": test_score,
|
|
1554
|
+
"lift_absolute": event_data.get("lift_absolute"),
|
|
1555
|
+
"lift_percent": event_data.get("lift_percent"),
|
|
1556
|
+
"instruction_text": instruction_text,
|
|
1557
|
+
"instruction_lines": event_data.get("instruction_lines", []),
|
|
1558
|
+
"demo_indices": event_data.get("demo_indices", []),
|
|
1559
|
+
"stage_payloads": event_data.get("stage_payloads", {}),
|
|
1560
|
+
"instruction_indices": event_data.get("instruction_indices", []),
|
|
1561
|
+
"test_per_seed": event_data.get("test_per_seed", {}),
|
|
1562
|
+
})
|
|
1563
|
+
elif event_type == "mipro.baseline.test":
|
|
1564
|
+
# Extract baseline test score
|
|
1565
|
+
if baseline_score is None:
|
|
1566
|
+
baseline_score = event_data.get("test_score")
|
|
1567
|
+
elif event_type == "mipro.instruction.proposed":
|
|
1568
|
+
# Collect proposed instructions
|
|
1569
|
+
proposed_instructions.append({
|
|
1570
|
+
"iteration": event_data.get("iteration"),
|
|
1571
|
+
"stage_id": event_data.get("stage_id"),
|
|
1572
|
+
"module_id": event_data.get("module_id"),
|
|
1573
|
+
"instruction_id": event_data.get("instruction_id"),
|
|
1574
|
+
"instruction_text": event_data.get("instruction_text", ""),
|
|
1575
|
+
"instruction_lines": event_data.get("instruction_lines", []),
|
|
1576
|
+
"demo_indices": event_data.get("demo_indices", []),
|
|
1577
|
+
"proposal_id": event_data.get("proposal_id"),
|
|
1578
|
+
"timestamp": event.get("created_at"),
|
|
1579
|
+
})
|
|
1580
|
+
elif event_type == "gepa.transformation.proposed":
|
|
1581
|
+
# Collect proposed transformations
|
|
1582
|
+
proposed_transformations.append({
|
|
1583
|
+
"generation": event_data.get("generation"),
|
|
1584
|
+
"mutation_type": event_data.get("mutation_type"),
|
|
1585
|
+
"operator": event_data.get("operator"),
|
|
1586
|
+
"transformation_id": event_data.get("transformation_id"),
|
|
1587
|
+
"parent_id": event_data.get("parent_id"),
|
|
1588
|
+
"transformation_text": event_data.get("transformation_text", ""),
|
|
1589
|
+
"transformation_dict": event_data.get("transformation_dict", {}),
|
|
1590
|
+
"mutation_params": event_data.get("mutation_params", {}),
|
|
1591
|
+
"timestamp": event.get("created_at"),
|
|
1592
|
+
})
|
|
1593
|
+
|
|
1594
|
+
# Check if we have any results to display (best_prompt, best_score, or candidates)
|
|
1595
|
+
has_results = bool(attempted_candidates or optimized_candidates or best_prompt or best_score is not None)
|
|
1596
|
+
if not has_results:
|
|
1597
|
+
return
|
|
1598
|
+
|
|
1599
|
+
# Determine algorithm name from events
|
|
1600
|
+
algorithm_name = "PROMPT LEARNING"
|
|
1601
|
+
for event in events:
|
|
1602
|
+
if isinstance(event, dict):
|
|
1603
|
+
event_type = event.get("type", "")
|
|
1604
|
+
if "gepa" in event_type.lower():
|
|
1605
|
+
algorithm_name = "GEPA"
|
|
1606
|
+
break
|
|
1607
|
+
elif "mipro" in event_type.lower():
|
|
1608
|
+
algorithm_name = "MIPRO"
|
|
1609
|
+
break
|
|
1610
|
+
|
|
1611
|
+
# Generate formatted report
|
|
1612
|
+
lines = []
|
|
1613
|
+
lines.append("=" * 80)
|
|
1614
|
+
lines.append(f"{algorithm_name} PROMPT LEARNING RESULTS")
|
|
1615
|
+
lines.append("=" * 80)
|
|
1616
|
+
lines.append(f"Job ID: {job_id}")
|
|
1617
|
+
lines.append(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
1618
|
+
lines.append("")
|
|
1619
|
+
if baseline_score is not None:
|
|
1620
|
+
lines.append(f"📊 Baseline Score: {baseline_score:.4f} ({baseline_score*100:.1f}%)")
|
|
1621
|
+
if best_score is not None:
|
|
1622
|
+
lines.append(f"🏆 Best Score: {best_score:.4f} ({best_score*100:.1f}%)")
|
|
1623
|
+
if baseline_score is not None and best_score is not None:
|
|
1624
|
+
improvement = ((best_score - baseline_score) / baseline_score) * 100 if baseline_score > 0 else 0
|
|
1625
|
+
lines.append(f"📈 Improvement: {improvement:+.1f}% relative ({(best_score - baseline_score)*100:+.1f} pp absolute)")
|
|
1626
|
+
lines.append("=" * 80)
|
|
1627
|
+
lines.append("")
|
|
1628
|
+
|
|
1629
|
+
# Add best prompt if available
|
|
1630
|
+
if best_prompt and isinstance(best_prompt, dict):
|
|
1631
|
+
lines.append("🏆 BEST PROMPT")
|
|
1632
|
+
lines.append("-" * 80)
|
|
1633
|
+
sections = best_prompt.get("sections", [])
|
|
1634
|
+
if not isinstance(sections, list):
|
|
1635
|
+
sections = []
|
|
1636
|
+
for sec in sections:
|
|
1637
|
+
if not isinstance(sec, dict):
|
|
1638
|
+
continue
|
|
1639
|
+
role = sec.get("role", "unknown")
|
|
1640
|
+
content = sec.get("content", "")
|
|
1641
|
+
lines.append(f"\n[{role.upper()}]:")
|
|
1642
|
+
lines.append(content)
|
|
1643
|
+
lines.append("")
|
|
1644
|
+
|
|
1645
|
+
# Add optimized candidates
|
|
1646
|
+
if optimized_candidates and isinstance(optimized_candidates, list):
|
|
1647
|
+
lines.append("=" * 80)
|
|
1648
|
+
lines.append(f"✨ TOP OPTIMIZED CANDIDATES ({len(optimized_candidates)})")
|
|
1649
|
+
lines.append("=" * 80)
|
|
1650
|
+
lines.append("")
|
|
1651
|
+
|
|
1652
|
+
for idx, cand in enumerate(optimized_candidates):
|
|
1653
|
+
if not isinstance(cand, dict):
|
|
1654
|
+
continue
|
|
1655
|
+
candidate_score = cand.get("score") or {}
|
|
1656
|
+
accuracy = candidate_score.get("accuracy", 0.0)
|
|
1657
|
+
prompt_length = candidate_score.get("prompt_length", 0)
|
|
1658
|
+
payload_kind = cand.get("payload_kind", "unknown")
|
|
1659
|
+
|
|
1660
|
+
# Try score.instance_scores first, then cand.instance_scores (explicit check)
|
|
1661
|
+
instance_scores = (
|
|
1662
|
+
candidate_score.get('instance_scores')
|
|
1663
|
+
if 'instance_scores' in candidate_score
|
|
1664
|
+
else cand.get('instance_scores')
|
|
1665
|
+
)
|
|
1666
|
+
n_eval = len(instance_scores) if instance_scores and isinstance(instance_scores, list) else 0
|
|
1667
|
+
|
|
1668
|
+
lines.append(f"[{idx+1}] Accuracy: {accuracy:.4f} | Length: {prompt_length} | Type: {payload_kind} | N: {n_eval}")
|
|
1669
|
+
lines.append("-" * 80)
|
|
1670
|
+
|
|
1671
|
+
obj = cand.get("object")
|
|
1672
|
+
if obj and isinstance(obj, dict) and payload_kind == "transformation":
|
|
1673
|
+
# For transformations, text_replacements are nested in data
|
|
1674
|
+
data_obj = obj.get("data", {})
|
|
1675
|
+
replacement_lines = _format_text_replacements(data_obj)
|
|
1676
|
+
lines.extend(replacement_lines)
|
|
1677
|
+
lines.append("")
|
|
1678
|
+
|
|
1679
|
+
# Add MIPRO top-K candidates
|
|
1680
|
+
if mipro_topk_candidates and isinstance(mipro_topk_candidates, list):
|
|
1681
|
+
# Sort by rank
|
|
1682
|
+
mipro_topk_candidates.sort(key=lambda x: x.get("rank", 999))
|
|
1683
|
+
lines.append("=" * 80)
|
|
1684
|
+
lines.append(f"🎯 TOP-K CANDIDATES ({len(mipro_topk_candidates)})")
|
|
1685
|
+
lines.append("=" * 80)
|
|
1686
|
+
lines.append("")
|
|
1687
|
+
|
|
1688
|
+
for cand in mipro_topk_candidates:
|
|
1689
|
+
rank = cand.get("rank", 0)
|
|
1690
|
+
train_score = cand.get("train_score", 0.0)
|
|
1691
|
+
test_score = cand.get("test_score", 0.0)
|
|
1692
|
+
lift_abs = cand.get("lift_absolute")
|
|
1693
|
+
lift_pct = cand.get("lift_percent")
|
|
1694
|
+
instruction_text = cand.get("instruction_text", "")
|
|
1695
|
+
instruction_lines = cand.get("instruction_lines", [])
|
|
1696
|
+
demo_indices = cand.get("demo_indices", [])
|
|
1697
|
+
instruction_indices = cand.get("instruction_indices", [])
|
|
1698
|
+
stage_payloads = cand.get("stage_payloads", {})
|
|
1699
|
+
test_per_seed = cand.get("test_per_seed", {})
|
|
1700
|
+
|
|
1701
|
+
lift_str = ""
|
|
1702
|
+
if lift_abs is not None and lift_pct is not None:
|
|
1703
|
+
lift_str = f" | Lift: {lift_abs:+.3f} ({lift_pct:+.1f}%)"
|
|
1704
|
+
|
|
1705
|
+
lines.append(f"[Rank {rank}] Train: {train_score:.4f} ({train_score*100:.1f}%) | Test: {test_score:.4f} ({test_score*100:.1f}%){lift_str}")
|
|
1706
|
+
lines.append("-" * 80)
|
|
1707
|
+
|
|
1708
|
+
# Show full instruction text (use instruction_lines if available, otherwise instruction_text)
|
|
1709
|
+
if instruction_lines:
|
|
1710
|
+
lines.append("Instructions:")
|
|
1711
|
+
for idx, instr_line in enumerate(instruction_lines, 1):
|
|
1712
|
+
lines.append(f" {idx}. {instr_line}")
|
|
1713
|
+
elif instruction_text:
|
|
1714
|
+
# Split multi-line instructions
|
|
1715
|
+
instr_parts = instruction_text.split("\n")
|
|
1716
|
+
if len(instr_parts) > 1:
|
|
1717
|
+
lines.append("Instructions:")
|
|
1718
|
+
for idx, part in enumerate(instr_parts, 1):
|
|
1719
|
+
if part.strip():
|
|
1720
|
+
lines.append(f" {idx}. {part.strip()}")
|
|
1721
|
+
else:
|
|
1722
|
+
lines.append(f"Instruction: {instruction_text}")
|
|
1723
|
+
|
|
1724
|
+
if instruction_indices:
|
|
1725
|
+
lines.append(f"Instruction Indices: {instruction_indices}")
|
|
1726
|
+
if demo_indices:
|
|
1727
|
+
lines.append(f"Demo Indices: {demo_indices}")
|
|
1728
|
+
|
|
1729
|
+
# Show per-stage breakdown if available
|
|
1730
|
+
if stage_payloads:
|
|
1731
|
+
lines.append("Per-stage breakdown:")
|
|
1732
|
+
for stage_id, payload in stage_payloads.items():
|
|
1733
|
+
if isinstance(payload, dict):
|
|
1734
|
+
instr_ids = payload.get("instruction_indices", [])
|
|
1735
|
+
demo_ids = payload.get("demo_indices", [])
|
|
1736
|
+
module_id = payload.get("module_id", "unknown")
|
|
1737
|
+
lines.append(f" [{module_id}/{stage_id}] instr_ids={instr_ids} demo_ids={demo_ids}")
|
|
1738
|
+
|
|
1739
|
+
# Show test per-seed scores if available
|
|
1740
|
+
if test_per_seed:
|
|
1741
|
+
seed_scores = []
|
|
1742
|
+
for seed, score in sorted(test_per_seed.items()):
|
|
1743
|
+
seed_scores.append(f"{seed}: {score:.2f}")
|
|
1744
|
+
if seed_scores:
|
|
1745
|
+
lines.append(f"Test per-seed: {', '.join(seed_scores)}")
|
|
1746
|
+
|
|
1747
|
+
lines.append("")
|
|
1748
|
+
|
|
1749
|
+
# Add all proposal candidates
|
|
1750
|
+
if attempted_candidates and isinstance(attempted_candidates, list):
|
|
1751
|
+
lines.append("=" * 80)
|
|
1752
|
+
lines.append(f"💡 ALL PROPOSAL CANDIDATES ({len(attempted_candidates)})")
|
|
1753
|
+
lines.append("=" * 80)
|
|
1754
|
+
lines.append("")
|
|
1755
|
+
|
|
1756
|
+
for idx, cand in enumerate(attempted_candidates):
|
|
1757
|
+
if not isinstance(cand, dict):
|
|
1758
|
+
continue
|
|
1759
|
+
accuracy = cand.get('accuracy', 0.0)
|
|
1760
|
+
prompt_length = cand.get('prompt_length', 0)
|
|
1761
|
+
tool_rate = cand.get('tool_call_rate', 0.0)
|
|
1762
|
+
instance_scores = cand.get('instance_scores', [])
|
|
1763
|
+
n_eval = len(instance_scores) if instance_scores else 0
|
|
1764
|
+
|
|
1765
|
+
lines.append(f"[{idx+1}] Accuracy: {accuracy:.4f} | Length: {prompt_length} | Tool Rate: {tool_rate:.2f} | N: {n_eval}")
|
|
1766
|
+
lines.append("-" * 80)
|
|
1767
|
+
|
|
1768
|
+
obj = cand.get("object")
|
|
1769
|
+
if obj and isinstance(obj, dict):
|
|
1770
|
+
# For proposals, text_replacements are at top level of object
|
|
1771
|
+
replacement_lines = _format_text_replacements(obj)
|
|
1772
|
+
lines.extend(replacement_lines)
|
|
1773
|
+
lines.append("")
|
|
1774
|
+
|
|
1775
|
+
# Add proposed instructions section (MIPRO)
|
|
1776
|
+
if proposed_instructions and isinstance(proposed_instructions, list):
|
|
1777
|
+
lines.append("=" * 80)
|
|
1778
|
+
lines.append(f"💡 PROPOSED INSTRUCTIONS ({len(proposed_instructions)})")
|
|
1779
|
+
lines.append("=" * 80)
|
|
1780
|
+
lines.append("")
|
|
1781
|
+
|
|
1782
|
+
for idx, instr in enumerate(proposed_instructions):
|
|
1783
|
+
if not isinstance(instr, dict):
|
|
1784
|
+
continue
|
|
1785
|
+
iteration = instr.get("iteration", "?")
|
|
1786
|
+
stage_id = instr.get("stage_id", "?")
|
|
1787
|
+
module_id = instr.get("module_id", "?")
|
|
1788
|
+
instruction_id = instr.get("instruction_id", "?")
|
|
1789
|
+
instruction_text = instr.get("instruction_text", "")
|
|
1790
|
+
instruction_lines = instr.get("instruction_lines", [])
|
|
1791
|
+
demo_indices = instr.get("demo_indices", [])
|
|
1792
|
+
|
|
1793
|
+
lines.append(f"[{idx+1}] Iteration {iteration} | Stage: {stage_id} | Module: {module_id} | ID: {instruction_id}")
|
|
1794
|
+
if demo_indices:
|
|
1795
|
+
lines.append(f"Demo Indices: {demo_indices}")
|
|
1796
|
+
lines.append("-" * 80)
|
|
1797
|
+
|
|
1798
|
+
# Show instruction text (use instruction_lines if available, otherwise instruction_text)
|
|
1799
|
+
if instruction_lines:
|
|
1800
|
+
for line_idx, line in enumerate(instruction_lines, 1):
|
|
1801
|
+
if line.strip():
|
|
1802
|
+
lines.append(f" {line_idx}. {line.strip()}")
|
|
1803
|
+
elif instruction_text:
|
|
1804
|
+
# Split multi-line instructions
|
|
1805
|
+
instr_parts = instruction_text.split("\n")
|
|
1806
|
+
if len(instr_parts) > 1:
|
|
1807
|
+
for line_idx, part in enumerate(instr_parts, 1):
|
|
1808
|
+
if part.strip():
|
|
1809
|
+
lines.append(f" {line_idx}. {part.strip()}")
|
|
1810
|
+
else:
|
|
1811
|
+
lines.append(f" {instruction_text}")
|
|
1812
|
+
|
|
1813
|
+
lines.append("")
|
|
1814
|
+
|
|
1815
|
+
# Add proposed transformations section (GEPA)
|
|
1816
|
+
if proposed_transformations and isinstance(proposed_transformations, list):
|
|
1817
|
+
lines.append("=" * 80)
|
|
1818
|
+
lines.append(f"🧬 PROPOSED TRANSFORMATIONS ({len(proposed_transformations)})")
|
|
1819
|
+
lines.append("=" * 80)
|
|
1820
|
+
lines.append("")
|
|
1821
|
+
|
|
1822
|
+
for idx, trans in enumerate(proposed_transformations):
|
|
1823
|
+
if not isinstance(trans, dict):
|
|
1824
|
+
continue
|
|
1825
|
+
generation = trans.get("generation", "?")
|
|
1826
|
+
mutation_type = trans.get("mutation_type", "?")
|
|
1827
|
+
operator = trans.get("operator", "?")
|
|
1828
|
+
transformation_id = trans.get("transformation_id", "?")
|
|
1829
|
+
parent_id = trans.get("parent_id", "?")
|
|
1830
|
+
transformation_text = trans.get("transformation_text", "")
|
|
1831
|
+
transformation_dict = trans.get("transformation_dict", {})
|
|
1832
|
+
|
|
1833
|
+
lines.append(f"[{idx+1}] Generation {generation} | Type: {mutation_type} | Operator: {operator}")
|
|
1834
|
+
lines.append(f"Transformation ID: {transformation_id} | Parent ID: {parent_id}")
|
|
1835
|
+
lines.append("-" * 80)
|
|
1836
|
+
|
|
1837
|
+
# Show transformation text
|
|
1838
|
+
if transformation_text:
|
|
1839
|
+
lines.append("Transformation Text:")
|
|
1840
|
+
lines.append(f" {transformation_text}")
|
|
1841
|
+
|
|
1842
|
+
# Show transformation dict details if available
|
|
1843
|
+
if transformation_dict:
|
|
1844
|
+
text_replacements = transformation_dict.get("text_replacements", [])
|
|
1845
|
+
if text_replacements:
|
|
1846
|
+
lines.append("Text Replacements:")
|
|
1847
|
+
for repl_idx, repl in enumerate(text_replacements, 1):
|
|
1848
|
+
if isinstance(repl, dict):
|
|
1849
|
+
apply_to = repl.get("apply_to_role", "unknown")
|
|
1850
|
+
old_text = repl.get("old_text", "")[:100]
|
|
1851
|
+
new_text = repl.get("new_text", "")[:200]
|
|
1852
|
+
lines.append(f" {repl_idx}. [{apply_to}]")
|
|
1853
|
+
if old_text:
|
|
1854
|
+
lines.append(f" Old: {old_text}...")
|
|
1855
|
+
if new_text:
|
|
1856
|
+
lines.append(f" New: {new_text}...")
|
|
1857
|
+
|
|
1858
|
+
lines.append("")
|
|
1859
|
+
|
|
1860
|
+
# Add summary table and chart before END OF REPORT
|
|
1861
|
+
lines.append("")
|
|
1862
|
+
lines.append("=" * 80)
|
|
1863
|
+
lines.append("FINAL SUMMARY")
|
|
1864
|
+
lines.append("=" * 80)
|
|
1865
|
+
|
|
1866
|
+
# Generate summary table text (reuse summary.py logic)
|
|
1867
|
+
try:
|
|
1868
|
+
from .summary import _generate_summary_text
|
|
1869
|
+
# Extract optimization curve from events if available
|
|
1870
|
+
optimization_curve = None
|
|
1871
|
+
# Try to extract curve from trial events
|
|
1872
|
+
trial_scores = []
|
|
1873
|
+
for event in events:
|
|
1874
|
+
if isinstance(event, dict):
|
|
1875
|
+
event_type = event.get("type", "")
|
|
1876
|
+
if event_type in ("prompt.learning.trial.complete", "mipro.new_incumbent"):
|
|
1877
|
+
data = event.get("data", {})
|
|
1878
|
+
trial_num = data.get("trial") or data.get("trial_num")
|
|
1879
|
+
score = data.get("score") or data.get("minibatch_score")
|
|
1880
|
+
if trial_num is not None and score is not None:
|
|
1881
|
+
trial_scores.append((trial_num, score))
|
|
1882
|
+
|
|
1883
|
+
if trial_scores:
|
|
1884
|
+
# Build optimization curve (best score so far at each trial)
|
|
1885
|
+
best_so_far = {}
|
|
1886
|
+
for trial_num, score in sorted(trial_scores):
|
|
1887
|
+
if trial_num not in best_so_far or score > best_so_far[trial_num]:
|
|
1888
|
+
best_so_far[trial_num] = score
|
|
1889
|
+
optimization_curve = sorted(best_so_far.items())
|
|
1890
|
+
|
|
1891
|
+
summary_text, curve_text = _generate_summary_text(
|
|
1892
|
+
events=events,
|
|
1893
|
+
algorithm=algorithm_name.lower() if algorithm_name else None,
|
|
1894
|
+
optimization_curve=optimization_curve,
|
|
1895
|
+
)
|
|
1896
|
+
if summary_text:
|
|
1897
|
+
lines.append(summary_text)
|
|
1898
|
+
if curve_text:
|
|
1899
|
+
lines.append("")
|
|
1900
|
+
lines.append(curve_text)
|
|
1901
|
+
except Exception as e:
|
|
1902
|
+
lines.append(f"⚠️ Could not generate summary: {e}")
|
|
1903
|
+
|
|
1904
|
+
lines.append("=" * 80)
|
|
1905
|
+
lines.append("END OF REPORT")
|
|
1906
|
+
lines.append("=" * 80)
|
|
1907
|
+
|
|
1908
|
+
# Determine save location
|
|
1909
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1910
|
+
|
|
1911
|
+
# Use results_folder from config (create if it doesn't exist)
|
|
1912
|
+
output_dir = results_folder
|
|
1913
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1914
|
+
|
|
1915
|
+
# Use algorithm-specific filename
|
|
1916
|
+
algorithm_prefix = algorithm_name.lower() if algorithm_name else "prompt_learning"
|
|
1917
|
+
output_file = output_dir / f"{algorithm_prefix}_results_{job_id}_{timestamp}.txt"
|
|
1918
|
+
|
|
1919
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
1920
|
+
f.write("\n".join(lines))
|
|
1921
|
+
|
|
1922
|
+
click.echo(f"\n📄 Results saved locally to: {output_file}")
|
|
1923
|
+
|
|
1924
|
+
# Also save verbose log file with all events (append summary if log was streamed live)
|
|
1925
|
+
log_file = output_dir / f"{algorithm_prefix}_log_{job_id}_{timestamp}.log"
|
|
1926
|
+
append_summary = log_file.exists() # If log file exists, it was streamed live, so just append summary
|
|
1927
|
+
_save_verbose_log_file(events, log_file, algorithm_name, job_id, append_summary=append_summary)
|
|
1928
|
+
click.echo(f"📋 Verbose log saved locally to: {log_file}")
|
|
1929
|
+
|
|
1930
|
+
except (PermissionError, OSError) as e:
|
|
1931
|
+
click.echo(f"⚠️ Could not save results file locally: {e}")
|
|
1932
|
+
except Exception as e:
|
|
1933
|
+
click.echo(f"⚠️ Unexpected error saving results file: {e}")
|
|
1934
|
+
|
|
1935
|
+
|
|
1936
|
+
def handle_prompt_learning(
|
|
1937
|
+
*,
|
|
1938
|
+
cfg_path: Path,
|
|
1939
|
+
backend_base: str,
|
|
1940
|
+
synth_key: str,
|
|
1941
|
+
task_url_override: str | None,
|
|
1942
|
+
allow_experimental: bool | None,
|
|
1943
|
+
dry_run: bool,
|
|
1944
|
+
poll: bool,
|
|
1945
|
+
poll_timeout: float,
|
|
1946
|
+
poll_interval: float,
|
|
1947
|
+
stream_format: str,
|
|
1948
|
+
display_config: dict[str, Any] | None = None,
|
|
1949
|
+
tui: bool = False,
|
|
1950
|
+
show_curve: bool = True,
|
|
1951
|
+
verbose_summary: bool = True,
|
|
1952
|
+
) -> None:
|
|
1953
|
+
"""Handle prompt learning job creation (MIPRO or GEPA)."""
|
|
1954
|
+
ctx: dict[str, Any] = {
|
|
1955
|
+
"cfg_path": str(cfg_path),
|
|
1956
|
+
"backend_base": backend_base,
|
|
1957
|
+
"task_url_override": task_url_override,
|
|
1958
|
+
"poll": poll,
|
|
1959
|
+
}
|
|
1960
|
+
log_info("handle_prompt_learning invoked", ctx=ctx)
|
|
1961
|
+
env_key = get_required_value(
|
|
1962
|
+
"environment_api_key",
|
|
1963
|
+
env_value=os.environ.get("ENVIRONMENT_API_KEY"),
|
|
1964
|
+
)
|
|
1965
|
+
os.environ["ENVIRONMENT_API_KEY"] = env_key
|
|
1966
|
+
|
|
1967
|
+
overrides: dict[str, Any] = {
|
|
1968
|
+
"backend": backend_base,
|
|
1969
|
+
"task_url": task_url_override,
|
|
1970
|
+
}
|
|
1971
|
+
|
|
1972
|
+
build = build_prompt_learning_payload(
|
|
1973
|
+
config_path=cfg_path,
|
|
1974
|
+
task_url=task_url_override,
|
|
1975
|
+
overrides=overrides,
|
|
1976
|
+
allow_experimental=allow_experimental,
|
|
1977
|
+
)
|
|
1978
|
+
|
|
1979
|
+
# Assertion: Validate task app URL is reachable from backend perspective
|
|
1980
|
+
# If backend is localhost and task app is localhost, they should be able to communicate
|
|
1981
|
+
task_app_url = build.task_url or ""
|
|
1982
|
+
if backend_base.startswith("http://localhost") or backend_base.startswith("http://127.0.0.1"):
|
|
1983
|
+
if task_app_url.startswith("http://localhost") or task_app_url.startswith("http://127.0.0.1"):
|
|
1984
|
+
# Both are local - this should work
|
|
1985
|
+
pass
|
|
1986
|
+
else:
|
|
1987
|
+
click.echo(f"⚠️ WARNING: Backend is local ({backend_base}) but task app is remote ({task_app_url})")
|
|
1988
|
+
click.echo(" The backend may not be able to reach the task app. Consider using a tunnel or local task app.")
|
|
1989
|
+
|
|
1990
|
+
click.echo("Performing task app health check…")
|
|
1991
|
+
click.echo(f"Task app URL: {build.task_url}")
|
|
1992
|
+
click.echo("⏳ Checking /health endpoint (timeout: 10s)...")
|
|
1993
|
+
health = check_local_api_health(build.task_url, env_key, timeout=10.0)
|
|
1994
|
+
if not health.ok:
|
|
1995
|
+
click.echo(f"❌ Task app health check failed: {health.detail}")
|
|
1996
|
+
click.echo(f" Health status: {health.health_status}")
|
|
1997
|
+
click.echo(f" Task info status: {health.task_info_status}")
|
|
1998
|
+
click.echo("💡 Troubleshooting:")
|
|
1999
|
+
click.echo(" 1. Ensure the task app is running: lsof -i :8102")
|
|
2000
|
+
click.echo(" 2. Test manually: curl -v http://127.0.0.1:8102/health")
|
|
2001
|
+
click.echo(" 3. Check task app logs for errors")
|
|
2002
|
+
click.echo(" 4. Restart the task app if it's hung")
|
|
2003
|
+
raise click.ClickException("Aborting due to failing health check")
|
|
2004
|
+
else:
|
|
2005
|
+
click.echo("Task app healthy")
|
|
2006
|
+
|
|
2007
|
+
# Ensure backend_base has /api prefix
|
|
2008
|
+
if not backend_base.endswith("/api"):
|
|
2009
|
+
backend_base = ensure_api_base(backend_base)
|
|
2010
|
+
|
|
2011
|
+
# Assertion: Validate backend URL before making request
|
|
2012
|
+
if not backend_base.startswith("http"):
|
|
2013
|
+
raise click.ClickException(
|
|
2014
|
+
f"Invalid backend URL: {backend_base}. Must start with http:// or https://"
|
|
2015
|
+
)
|
|
2016
|
+
|
|
2017
|
+
create_url = f"{backend_base}/prompt-learning/online/jobs"
|
|
2018
|
+
headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
|
|
2019
|
+
|
|
2020
|
+
click.echo(f"POST {create_url}")
|
|
2021
|
+
click.echo("Payload preview:\n" + preview_json(build.payload, limit=800))
|
|
2022
|
+
|
|
2023
|
+
# Assertion: If using local backend, verify it's actually localhost
|
|
2024
|
+
if (
|
|
2025
|
+
os.getenv("BACKEND_BASE_URL")
|
|
2026
|
+
and "localhost" in os.getenv("BACKEND_BASE_URL", "").lower()
|
|
2027
|
+
and "localhost" not in backend_base.lower()
|
|
2028
|
+
and "127.0.0.1" not in backend_base
|
|
2029
|
+
):
|
|
2030
|
+
raise click.ClickException(
|
|
2031
|
+
f"BACKEND_BASE_URL was set to localhost but backend_base resolved to {backend_base}. "
|
|
2032
|
+
f"This indicates the environment variable is not being respected."
|
|
2033
|
+
)
|
|
2034
|
+
|
|
2035
|
+
# Increase timeout for job creation (can take longer due to validation checks)
|
|
2036
|
+
resp = http_post(create_url, headers=headers, json_body=build.payload, timeout=180.0)
|
|
2037
|
+
try:
|
|
2038
|
+
js = resp.json()
|
|
2039
|
+
except json.JSONDecodeError as e:
|
|
2040
|
+
click.echo(f"⚠️ Failed to parse JSON response: {e}")
|
|
2041
|
+
js = {"status": resp.status_code, "text": resp.text[:400]}
|
|
2042
|
+
click.echo(f"Response {resp.status_code}: {preview_json(js, limit=400)}")
|
|
2043
|
+
if resp.status_code not in (200, 201):
|
|
2044
|
+
raise click.ClickException("Job creation failed")
|
|
2045
|
+
job_id = js.get("job_id") or js.get("id")
|
|
2046
|
+
if not job_id:
|
|
2047
|
+
raise click.ClickException("Response missing job id")
|
|
2048
|
+
|
|
2049
|
+
if not poll:
|
|
2050
|
+
click.echo(f"Created job {job_id} (polling disabled)")
|
|
2051
|
+
return
|
|
2052
|
+
|
|
2053
|
+
algorithm = str(build.payload.get("algorithm") or "").lower()
|
|
2054
|
+
metric_names: set[str] | None = None
|
|
2055
|
+
if algorithm == "gepa":
|
|
2056
|
+
metric_names = {"gepa.transformation.mean_score"}
|
|
2057
|
+
|
|
2058
|
+
chart_mode = stream_format == "chart" and algorithm == "gepa"
|
|
2059
|
+
if stream_format == "chart" and not chart_mode:
|
|
2060
|
+
click.echo("Chart streaming is only available for GEPA jobs; showing textual updates instead.")
|
|
2061
|
+
|
|
2062
|
+
# Prepare log file path for real-time streaming
|
|
2063
|
+
results_folder = parse_results_folder(cfg_path)
|
|
2064
|
+
from datetime import datetime
|
|
2065
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
2066
|
+
algorithm_prefix = algorithm.lower() if algorithm else "prompt_learning"
|
|
2067
|
+
log_file = results_folder / f"{algorithm_prefix}_log_{job_id}_{timestamp}.log"
|
|
2068
|
+
|
|
2069
|
+
# Write initial streaming message to log file if handler will be created
|
|
2070
|
+
if not chart_mode:
|
|
2071
|
+
try:
|
|
2072
|
+
log_file.parent.mkdir(parents=True, exist_ok=True)
|
|
2073
|
+
with open(log_file, "a", encoding="utf-8") as f:
|
|
2074
|
+
f.write("\n=== Streaming Job Progress ===\n")
|
|
2075
|
+
except Exception:
|
|
2076
|
+
pass # Continue even if log file can't be written
|
|
2077
|
+
|
|
2078
|
+
click.echo("\n=== Streaming Job Progress ===")
|
|
2079
|
+
|
|
2080
|
+
# Create appropriate handler based on algorithm
|
|
2081
|
+
if algorithm == "gepa":
|
|
2082
|
+
if chart_mode:
|
|
2083
|
+
config = StreamConfig(
|
|
2084
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
|
|
2085
|
+
event_types={
|
|
2086
|
+
"prompt.learning.progress",
|
|
2087
|
+
"prompt.learning.gepa.start",
|
|
2088
|
+
"prompt.learning.gepa.complete",
|
|
2089
|
+
},
|
|
2090
|
+
metric_names=metric_names,
|
|
2091
|
+
)
|
|
2092
|
+
handlers = [LossCurveHandler()]
|
|
2093
|
+
click.echo("Using live loss chart (metric=gepa.transformation.mean_score)")
|
|
2094
|
+
else:
|
|
2095
|
+
config = StreamConfig(
|
|
2096
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS, StreamType.TIMELINE},
|
|
2097
|
+
metric_names=metric_names,
|
|
2098
|
+
max_events_per_poll=500, # Capture more events per poll
|
|
2099
|
+
deduplicate=True, # Still deduplicate but capture more
|
|
2100
|
+
# Don't filter events - show all of them
|
|
2101
|
+
event_types=None, # No whitelist - show all event types
|
|
2102
|
+
event_types_exclude=None, # No blacklist - show all events
|
|
2103
|
+
event_levels=None, # Show all levels
|
|
2104
|
+
)
|
|
2105
|
+
# Use PromptLearningHandler for enhanced event handling
|
|
2106
|
+
handler = PromptLearningHandler(
|
|
2107
|
+
show_trial_results=display_config.get("show_trial_results", True) if display_config else True,
|
|
2108
|
+
show_transformations=display_config.get("show_transformations", False) if display_config else False,
|
|
2109
|
+
show_validation=display_config.get("show_validation", True) if display_config else True,
|
|
2110
|
+
max_tokens=display_config.get("max_tokens") if display_config else None,
|
|
2111
|
+
max_time_seconds=display_config.get("max_time_seconds") if display_config else None,
|
|
2112
|
+
max_rollouts=display_config.get("max_rollouts") if display_config else None,
|
|
2113
|
+
log_file=log_file,
|
|
2114
|
+
)
|
|
2115
|
+
handlers = [handler]
|
|
2116
|
+
else:
|
|
2117
|
+
# Use PromptLearningHandler for MIPRO (same as GEPA)
|
|
2118
|
+
config = StreamConfig(
|
|
2119
|
+
enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS, StreamType.TIMELINE},
|
|
2120
|
+
metric_names=metric_names,
|
|
2121
|
+
max_events_per_poll=500, # Capture more events per poll
|
|
2122
|
+
deduplicate=True, # Still deduplicate but capture more
|
|
2123
|
+
# Don't filter events - show all of them
|
|
2124
|
+
event_types=None, # No whitelist - show all event types
|
|
2125
|
+
event_types_exclude=None, # No blacklist - show all events
|
|
2126
|
+
event_levels=None, # Show all levels
|
|
2127
|
+
)
|
|
2128
|
+
handler = PromptLearningHandler(
|
|
2129
|
+
show_trial_results=display_config.get("show_trial_results", True) if display_config else True,
|
|
2130
|
+
show_transformations=display_config.get("show_transformations", False) if display_config else False,
|
|
2131
|
+
show_validation=display_config.get("show_validation", True) if display_config else True,
|
|
2132
|
+
max_tokens=display_config.get("max_tokens") if display_config else None,
|
|
2133
|
+
max_time_seconds=display_config.get("max_time_seconds") if display_config else None,
|
|
2134
|
+
max_rollouts=display_config.get("max_rollouts") if display_config else None,
|
|
2135
|
+
log_file=log_file,
|
|
2136
|
+
)
|
|
2137
|
+
handlers = [handler]
|
|
2138
|
+
|
|
2139
|
+
streamer = JobStreamer(
|
|
2140
|
+
base_url=backend_base,
|
|
2141
|
+
api_key=synth_key,
|
|
2142
|
+
job_id=job_id,
|
|
2143
|
+
endpoints=StreamEndpoints.prompt_learning(job_id),
|
|
2144
|
+
config=config,
|
|
2145
|
+
handlers=handlers,
|
|
2146
|
+
interval_seconds=poll_interval,
|
|
2147
|
+
timeout_seconds=poll_timeout,
|
|
2148
|
+
)
|
|
2149
|
+
final_status = asyncio.run(streamer.stream_until_terminal())
|
|
2150
|
+
|
|
2151
|
+
# Write final status to log file if handler has one
|
|
2152
|
+
if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
|
|
2153
|
+
handlers[0]._write_log(f"Final status: {final_status.get('status', 'unknown')}")
|
|
2154
|
+
handlers[0]._write_log(preview_json(final_status, limit=600))
|
|
2155
|
+
|
|
2156
|
+
click.echo(f"Final status: {final_status.get('status', 'unknown')}")
|
|
2157
|
+
click.echo(preview_json(final_status, limit=600))
|
|
2158
|
+
|
|
2159
|
+
# Display final summary for GEPA/MIPRO jobs if requested
|
|
2160
|
+
if verbose_summary and algorithm in ("gepa", "mipro"):
|
|
2161
|
+
optimization_curve = None
|
|
2162
|
+
if isinstance(handlers[0], PromptLearningHandler):
|
|
2163
|
+
optimization_curve = handlers[0].optimization_curve
|
|
2164
|
+
|
|
2165
|
+
from .summary import display_prompt_learning_summary
|
|
2166
|
+
# Pass log_writer if handler has one
|
|
2167
|
+
log_writer = None
|
|
2168
|
+
if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
|
|
2169
|
+
log_writer = handlers[0]._write_log
|
|
2170
|
+
display_prompt_learning_summary(
|
|
2171
|
+
job_id=job_id,
|
|
2172
|
+
backend_base=backend_base,
|
|
2173
|
+
api_key=synth_key,
|
|
2174
|
+
optimization_curve=optimization_curve,
|
|
2175
|
+
show_curve=show_curve,
|
|
2176
|
+
algorithm=algorithm,
|
|
2177
|
+
log_writer=log_writer,
|
|
2178
|
+
)
|
|
2179
|
+
|
|
2180
|
+
# Save results file locally
|
|
2181
|
+
# Parse and validate results_folder from config (already done above, but ensure it's available)
|
|
2182
|
+
if 'results_folder' not in locals():
|
|
2183
|
+
results_folder = parse_results_folder(cfg_path)
|
|
2184
|
+
|
|
2185
|
+
# Close log file if handler has one (flush is already called by streamer, but ensure it's closed)
|
|
2186
|
+
if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
|
|
2187
|
+
handlers[0].flush()
|
|
2188
|
+
|
|
2189
|
+
_save_prompt_learning_results_locally(
|
|
2190
|
+
backend_base=backend_base,
|
|
2191
|
+
api_key=synth_key,
|
|
2192
|
+
job_id=job_id,
|
|
2193
|
+
config_path=cfg_path,
|
|
2194
|
+
results_folder=results_folder,
|
|
2195
|
+
)
|
|
2196
|
+
|
|
2197
|
+
|
|
2198
|
+
def register(cli: click.Group) -> None:
|
|
2199
|
+
cli.add_command(train_command)
|