synth-ai 0.2.14__py3-none-any.whl → 0.2.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/README.md +1 -0
- examples/multi_step/SFT_README.md +147 -0
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +9 -9
- examples/multi_step/configs/crafter_sft_qwen30b_lora.toml +62 -0
- examples/multi_step/convert_traces_to_sft.py +84 -0
- examples/multi_step/run_sft_qwen30b.sh +45 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +2 -1
- examples/qwen_coder/configs/coder_lora_4b.toml +2 -1
- examples/qwen_coder/configs/coder_lora_small.toml +2 -1
- examples/qwen_vl/BUGS_AND_FIXES.md +232 -0
- examples/qwen_vl/IMAGE_VALIDATION_COMPLETE.md +271 -0
- examples/qwen_vl/IMAGE_VALIDATION_SUMMARY.md +260 -0
- examples/qwen_vl/INFERENCE_SFT_TESTS.md +412 -0
- examples/qwen_vl/NEXT_STEPS_2B.md +325 -0
- examples/qwen_vl/QUICKSTART.md +327 -0
- examples/qwen_vl/QUICKSTART_RL_VISION.md +110 -0
- examples/qwen_vl/README.md +154 -0
- examples/qwen_vl/RL_VISION_COMPLETE.md +475 -0
- examples/qwen_vl/RL_VISION_TESTING.md +333 -0
- examples/qwen_vl/SDK_VISION_INTEGRATION.md +328 -0
- examples/qwen_vl/SETUP_COMPLETE.md +275 -0
- examples/qwen_vl/VISION_TESTS_COMPLETE.md +490 -0
- examples/qwen_vl/VLM_PIPELINE_COMPLETE.md +242 -0
- examples/qwen_vl/__init__.py +2 -0
- examples/qwen_vl/collect_data_via_cli.md +423 -0
- examples/qwen_vl/collect_vision_traces.py +368 -0
- examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +127 -0
- examples/qwen_vl/configs/crafter_vlm_sft_example.toml +60 -0
- examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +43 -0
- examples/qwen_vl/configs/eval_gpt4o_vision_proper.toml +29 -0
- examples/qwen_vl/configs/eval_gpt5nano_vision.toml +45 -0
- examples/qwen_vl/configs/eval_qwen2vl_vision.toml +44 -0
- examples/qwen_vl/configs/filter_qwen2vl_sft.toml +50 -0
- examples/qwen_vl/configs/filter_vision_sft.toml +53 -0
- examples/qwen_vl/configs/filter_vision_test.toml +8 -0
- examples/qwen_vl/configs/sft_qwen3_vl_2b_test.toml +54 -0
- examples/qwen_vl/crafter_gpt5nano_agent.py +308 -0
- examples/qwen_vl/crafter_qwen_vl_agent.py +300 -0
- examples/qwen_vl/run_vision_comparison.sh +62 -0
- examples/qwen_vl/run_vision_sft_pipeline.sh +175 -0
- examples/qwen_vl/test_image_validation.py +201 -0
- examples/qwen_vl/test_sft_vision_data.py +110 -0
- examples/rl/README.md +1 -1
- examples/rl/configs/eval_base_qwen.toml +17 -0
- examples/rl/configs/eval_rl_qwen.toml +13 -0
- examples/rl/configs/rl_from_base_qwen.toml +37 -0
- examples/rl/configs/rl_from_base_qwen17.toml +76 -0
- examples/rl/configs/rl_from_ft_qwen.toml +37 -0
- examples/rl/run_eval.py +436 -0
- examples/rl/run_rl_and_save.py +111 -0
- examples/rl/task_app/README.md +22 -0
- examples/rl/task_app/math_single_step.py +990 -0
- examples/rl/task_app/math_task_app.py +111 -0
- examples/sft/README.md +5 -5
- examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -2
- examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -3
- examples/sft/evaluate.py +2 -4
- examples/sft/export_dataset.py +7 -4
- examples/swe/task_app/README.md +1 -1
- examples/swe/task_app/grpo_swe_mini.py +0 -1
- examples/swe/task_app/grpo_swe_mini_task_app.py +0 -12
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +13 -13
- examples/swe/task_app/hosted/policy_routes.py +0 -2
- examples/swe/task_app/hosted/rollout.py +0 -8
- examples/task_apps/crafter/task_app/grpo_crafter.py +4 -7
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +59 -1
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +30 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +62 -31
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +16 -14
- examples/task_apps/enron/__init__.py +1 -0
- examples/vlm/README.md +3 -3
- examples/vlm/configs/crafter_vlm_gpt4o.toml +2 -0
- examples/vlm/crafter_openai_vlm_agent.py +3 -5
- examples/vlm/filter_image_rows.py +1 -1
- examples/vlm/run_crafter_vlm_benchmark.py +2 -2
- examples/warming_up_to_rl/_utils.py +92 -0
- examples/warming_up_to_rl/analyze_trace_db.py +1 -1
- examples/warming_up_to_rl/configs/crafter_fft.toml +2 -0
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +2 -0
- examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +2 -0
- examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +2 -0
- examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +2 -1
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +2 -1
- examples/warming_up_to_rl/configs/rl_from_ft.toml +2 -0
- examples/warming_up_to_rl/export_trace_sft.py +174 -60
- examples/warming_up_to_rl/readme.md +63 -132
- examples/warming_up_to_rl/run_fft_and_save.py +1 -1
- examples/warming_up_to_rl/run_rl_and_save.py +1 -1
- examples/warming_up_to_rl/task_app/README.md +42 -0
- examples/warming_up_to_rl/task_app/grpo_crafter.py +696 -0
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +135 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +143 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1226 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +522 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +478 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +108 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +204 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +618 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +100 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +1081 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +195 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1861 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +211 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +161 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +137 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +62 -0
- synth_ai/__init__.py +44 -30
- synth_ai/_utils/__init__.py +47 -0
- synth_ai/_utils/base_url.py +10 -0
- synth_ai/_utils/http.py +10 -0
- synth_ai/_utils/prompts.py +10 -0
- synth_ai/_utils/task_app_state.py +12 -0
- synth_ai/_utils/user_config.py +10 -0
- synth_ai/api/models/supported.py +144 -7
- synth_ai/api/train/__init__.py +13 -1
- synth_ai/api/train/cli.py +30 -7
- synth_ai/api/train/config_finder.py +18 -11
- synth_ai/api/train/env_resolver.py +13 -10
- synth_ai/cli/__init__.py +62 -78
- synth_ai/cli/_modal_wrapper.py +7 -5
- synth_ai/cli/_typer_patch.py +0 -2
- synth_ai/cli/_validate_task_app.py +22 -4
- synth_ai/cli/legacy_root_backup.py +3 -1
- synth_ai/cli/lib/__init__.py +10 -0
- synth_ai/cli/lib/task_app_discovery.py +7 -0
- synth_ai/cli/lib/task_app_env.py +518 -0
- synth_ai/cli/recent.py +2 -1
- synth_ai/cli/setup.py +266 -0
- synth_ai/cli/status.py +1 -1
- synth_ai/cli/task_app_deploy.py +16 -0
- synth_ai/cli/task_app_list.py +25 -0
- synth_ai/cli/task_app_modal_serve.py +16 -0
- synth_ai/cli/task_app_serve.py +18 -0
- synth_ai/cli/task_apps.py +71 -31
- synth_ai/cli/traces.py +1 -1
- synth_ai/cli/train.py +18 -0
- synth_ai/cli/tui.py +7 -2
- synth_ai/cli/turso.py +1 -1
- synth_ai/cli/watch.py +1 -1
- synth_ai/demos/__init__.py +10 -0
- synth_ai/demos/core/__init__.py +28 -1
- synth_ai/demos/crafter/__init__.py +1 -0
- synth_ai/demos/crafter/crafter_fft_4b.toml +55 -0
- synth_ai/demos/crafter/grpo_crafter_task_app.py +185 -0
- synth_ai/demos/crafter/rl_from_base_qwen4b.toml +74 -0
- synth_ai/demos/demo_registry.py +176 -0
- synth_ai/demos/math/__init__.py +1 -0
- synth_ai/demos/math/_common.py +16 -0
- synth_ai/demos/math/app.py +38 -0
- synth_ai/demos/math/config.toml +76 -0
- synth_ai/demos/math/deploy_modal.py +54 -0
- synth_ai/demos/math/modal_task_app.py +702 -0
- synth_ai/demos/math/task_app_entry.py +51 -0
- synth_ai/environments/environment/core.py +7 -1
- synth_ai/environments/examples/bandit/engine.py +0 -1
- synth_ai/environments/examples/bandit/environment.py +0 -1
- synth_ai/environments/examples/wordle/environment.py +0 -1
- synth_ai/evals/base.py +16 -5
- synth_ai/evals/client.py +1 -1
- synth_ai/inference/client.py +1 -1
- synth_ai/judge_schemas.py +8 -8
- synth_ai/learning/client.py +1 -1
- synth_ai/learning/health.py +1 -1
- synth_ai/learning/jobs.py +1 -1
- synth_ai/learning/rl/client.py +1 -1
- synth_ai/learning/rl/env_keys.py +1 -1
- synth_ai/learning/rl/secrets.py +1 -1
- synth_ai/learning/sft/client.py +1 -1
- synth_ai/learning/sft/data.py +407 -4
- synth_ai/learning/validators.py +4 -1
- synth_ai/task/apps/__init__.py +4 -2
- synth_ai/task/config.py +6 -4
- synth_ai/task/rubrics/__init__.py +1 -2
- synth_ai/task/rubrics/loaders.py +14 -10
- synth_ai/task/rubrics.py +219 -0
- synth_ai/task/trace_correlation_helpers.py +24 -11
- synth_ai/task/tracing_utils.py +14 -3
- synth_ai/task/validators.py +2 -3
- synth_ai/tracing_v3/abstractions.py +3 -3
- synth_ai/tracing_v3/config.py +15 -13
- synth_ai/tracing_v3/constants.py +21 -0
- synth_ai/tracing_v3/db_config.py +3 -1
- synth_ai/tracing_v3/decorators.py +10 -7
- synth_ai/tracing_v3/llm_call_record_helpers.py +5 -5
- synth_ai/tracing_v3/session_tracer.py +7 -7
- synth_ai/tracing_v3/storage/base.py +29 -29
- synth_ai/tracing_v3/storage/config.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +8 -9
- synth_ai/tracing_v3/turso/native_manager.py +80 -72
- synth_ai/tracing_v3/utils.py +2 -2
- synth_ai/tui/cli/query_experiments.py +4 -4
- synth_ai/tui/cli/query_experiments_v3.py +4 -4
- synth_ai/tui/dashboard.py +14 -9
- synth_ai/utils/__init__.py +101 -0
- synth_ai/utils/base_url.py +94 -0
- synth_ai/utils/cli.py +131 -0
- synth_ai/utils/env.py +287 -0
- synth_ai/utils/http.py +169 -0
- synth_ai/utils/modal.py +308 -0
- synth_ai/utils/process.py +212 -0
- synth_ai/utils/prompts.py +39 -0
- synth_ai/utils/sqld.py +122 -0
- synth_ai/utils/task_app_discovery.py +882 -0
- synth_ai/utils/task_app_env.py +186 -0
- synth_ai/utils/task_app_state.py +318 -0
- synth_ai/utils/user_config.py +137 -0
- synth_ai/v0/config/__init__.py +1 -5
- synth_ai/v0/config/base_url.py +1 -7
- synth_ai/v0/tracing/config.py +1 -1
- synth_ai/v0/tracing/decorators.py +1 -1
- synth_ai/v0/tracing/upload.py +1 -1
- synth_ai/v0/tracing_v1/config.py +1 -1
- synth_ai/v0/tracing_v1/decorators.py +1 -1
- synth_ai/v0/tracing_v1/upload.py +1 -1
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/METADATA +85 -31
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/RECORD +229 -117
- synth_ai/cli/man.py +0 -106
- synth_ai/compound/cais.py +0 -0
- synth_ai/core/experiment.py +0 -13
- synth_ai/core/system.py +0 -15
- synth_ai/demo_registry.py +0 -295
- synth_ai/handshake.py +0 -109
- synth_ai/http.py +0 -26
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.14.dist-info → synth_ai-0.2.16.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Compatibility shims for legacy `synth_ai._utils.*` imports.
|
|
3
|
+
|
|
4
|
+
The modern codebase exposes these helpers under ``synth_ai.utils``. These
|
|
5
|
+
modules re-export the public symbols so existing downstream code (and our own
|
|
6
|
+
older examples/tests) continue to work without modification.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from importlib import import_module
|
|
12
|
+
from types import ModuleType
|
|
13
|
+
|
|
14
|
+
_MAPPING = {
|
|
15
|
+
"base_url": "synth_ai.utils.base_url",
|
|
16
|
+
"http": "synth_ai.utils.http",
|
|
17
|
+
"prompts": "synth_ai.utils.prompts",
|
|
18
|
+
"task_app_state": "synth_ai.utils.task_app_state",
|
|
19
|
+
"user_config": "synth_ai.utils.user_config",
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
__all__ = sorted(_MAPPING.keys())
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def __getattr__(name: str) -> ModuleType:
|
|
26
|
+
target = _MAPPING.get(name)
|
|
27
|
+
if not target:
|
|
28
|
+
raise AttributeError(f"module 'synth_ai._utils' has no attribute '{name}'")
|
|
29
|
+
module = import_module(target)
|
|
30
|
+
globals()[name] = module
|
|
31
|
+
return module
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def __dir__() -> list[str]:
|
|
35
|
+
return sorted(set(globals()) | set(__all__))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _export(module_name: str) -> None:
|
|
39
|
+
module = import_module(_MAPPING[module_name])
|
|
40
|
+
globals().setdefault(module_name, module)
|
|
41
|
+
if hasattr(module, "__all__"):
|
|
42
|
+
for attr in module.__all__: # type: ignore[attr-defined]
|
|
43
|
+
globals().setdefault(attr, getattr(module, attr))
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
for _name in __all__:
|
|
47
|
+
_export(_name)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from synth_ai.utils.base_url import * # noqa: F401,F403
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from synth_ai.utils.base_url import __all__ as __wrapped_all__ # type: ignore[attr-defined]
|
|
7
|
+
except ImportError: # pragma: no cover - defensive
|
|
8
|
+
__wrapped_all__ = []
|
|
9
|
+
|
|
10
|
+
__all__ = list(__wrapped_all__)
|
synth_ai/_utils/http.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from synth_ai.utils.http import * # noqa: F401,F403
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from synth_ai.utils.http import __all__ as __wrapped_all__ # type: ignore[attr-defined]
|
|
7
|
+
except ImportError: # pragma: no cover - defensive
|
|
8
|
+
__wrapped_all__ = []
|
|
9
|
+
|
|
10
|
+
__all__ = list(__wrapped_all__)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from synth_ai.utils.prompts import * # noqa: F401,F403
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from synth_ai.utils.prompts import __all__ as __wrapped_all__ # type: ignore[attr-defined]
|
|
7
|
+
except ImportError: # pragma: no cover - defensive
|
|
8
|
+
__wrapped_all__ = []
|
|
9
|
+
|
|
10
|
+
__all__ = list(__wrapped_all__)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from synth_ai.utils.task_app_state import * # noqa: F401,F403
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from synth_ai.utils.task_app_state import (
|
|
7
|
+
__all__ as __wrapped_all__, # type: ignore[attr-defined]
|
|
8
|
+
)
|
|
9
|
+
except ImportError: # pragma: no cover - defensive
|
|
10
|
+
__wrapped_all__ = []
|
|
11
|
+
|
|
12
|
+
__all__ = list(__wrapped_all__)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from synth_ai.utils.user_config import * # noqa: F401,F403
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from synth_ai.utils.user_config import __all__ as __wrapped_all__ # type: ignore[attr-defined]
|
|
7
|
+
except ImportError: # pragma: no cover - defensive
|
|
8
|
+
__wrapped_all__ = []
|
|
9
|
+
|
|
10
|
+
__all__ = list(__wrapped_all__)
|
synth_ai/api/models/supported.py
CHANGED
|
@@ -20,35 +20,106 @@ QWEN3_MODELS: list[str] = [
|
|
|
20
20
|
"Qwen/Qwen3-14B",
|
|
21
21
|
"Qwen/Qwen3-30B-A3B",
|
|
22
22
|
"Qwen/Qwen3-32B",
|
|
23
|
-
#
|
|
23
|
+
# 2507 baseline models
|
|
24
|
+
"Qwen/Qwen3-4B-2507",
|
|
25
|
+
# Instruct variants (no <think> tags)
|
|
26
|
+
"Qwen/Qwen3-4B-Instruct-2507",
|
|
27
|
+
"Qwen/Qwen3-4B-Instruct-2507-FP8",
|
|
28
|
+
"Qwen/Qwen3-30B-A3B-Instruct-2507",
|
|
29
|
+
"Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
|
|
30
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
31
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
|
|
32
|
+
# Thinking variants (with <think> tags)
|
|
24
33
|
"Qwen/Qwen3-4B-Thinking-2507",
|
|
34
|
+
"Qwen/Qwen3-4B-Thinking-2507-FP8",
|
|
25
35
|
"Qwen/Qwen3-30B-A3B-Thinking-2507",
|
|
36
|
+
"Qwen/Qwen3-30B-A3B-Thinking-2507-FP8",
|
|
26
37
|
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
38
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507-FP8",
|
|
27
39
|
]
|
|
28
40
|
|
|
29
41
|
# Qwen3 Coder family (backend-supported); text-only, SFT/inference
|
|
30
42
|
QWEN3_CODER_MODELS: list[str] = [
|
|
31
|
-
# Instruct variants used for coding tasks
|
|
43
|
+
# Instruct variants used for coding tasks (no <think> tags)
|
|
32
44
|
"Qwen/Qwen3-Coder-30B-A3B-Instruct",
|
|
45
|
+
"Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8",
|
|
33
46
|
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
47
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
# Qwen3-VL family (vision-language models); multimodal, SFT/inference
|
|
51
|
+
QWEN3_VL_MODELS: list[str] = [
|
|
52
|
+
# Vision-Language Models (Qwen3-VL)
|
|
53
|
+
"Qwen/Qwen3-VL-2B-Instruct",
|
|
54
|
+
"Qwen/Qwen3-VL-2B-Thinking",
|
|
55
|
+
"Qwen/Qwen3-VL-4B-Instruct",
|
|
56
|
+
"Qwen/Qwen3-VL-4B-Thinking",
|
|
57
|
+
"Qwen/Qwen3-VL-8B-Instruct",
|
|
58
|
+
"Qwen/Qwen3-VL-8B-Thinking",
|
|
59
|
+
"Qwen/Qwen3-VL-30B-A3B-Instruct",
|
|
60
|
+
"Qwen/Qwen3-VL-30B-A3B-Thinking",
|
|
61
|
+
"Qwen/Qwen3-VL-32B-Instruct",
|
|
62
|
+
"Qwen/Qwen3-VL-32B-Thinking",
|
|
63
|
+
"Qwen/Qwen3-VL-235B-A22B-Instruct",
|
|
64
|
+
"Qwen/Qwen3-VL-235B-A22B-Thinking",
|
|
34
65
|
]
|
|
35
66
|
|
|
36
67
|
# Training support sets
|
|
37
68
|
RL_SUPPORTED_MODELS: frozenset[str] = frozenset(
|
|
38
69
|
{
|
|
70
|
+
# Legacy base models
|
|
39
71
|
"Qwen/Qwen3-0.6B",
|
|
40
72
|
"Qwen/Qwen3-1.7B",
|
|
41
73
|
"Qwen/Qwen3-4B",
|
|
42
|
-
"Qwen/Qwen3-4B-Thinking-2507",
|
|
43
74
|
"Qwen/Qwen3-8B",
|
|
44
75
|
"Qwen/Qwen3-14B",
|
|
45
76
|
"Qwen/Qwen3-30B-A3B",
|
|
77
|
+
# 2507 models - base
|
|
78
|
+
"Qwen/Qwen3-4B-2507",
|
|
79
|
+
# 2507 models - instruct (no <think> tags)
|
|
80
|
+
"Qwen/Qwen3-4B-Instruct-2507",
|
|
81
|
+
"Qwen/Qwen3-4B-Instruct-2507-FP8",
|
|
82
|
+
"Qwen/Qwen3-30B-A3B-Instruct-2507",
|
|
83
|
+
"Qwen/Qwen3-30B-A3B-Instruct-2507-FP8",
|
|
84
|
+
# 2507 models - thinking (with <think> tags)
|
|
85
|
+
"Qwen/Qwen3-4B-Thinking-2507",
|
|
86
|
+
"Qwen/Qwen3-4B-Thinking-2507-FP8",
|
|
46
87
|
"Qwen/Qwen3-30B-A3B-Thinking-2507",
|
|
88
|
+
"Qwen/Qwen3-30B-A3B-Thinking-2507-FP8",
|
|
89
|
+
# Coder instruct models
|
|
90
|
+
"Qwen/Qwen3-Coder-30B-A3B-Instruct",
|
|
91
|
+
"Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8",
|
|
92
|
+
# Vision-Language models (Qwen3-VL)
|
|
93
|
+
"Qwen/Qwen3-VL-2B-Instruct",
|
|
94
|
+
"Qwen/Qwen3-VL-2B-Thinking",
|
|
95
|
+
"Qwen/Qwen3-VL-4B-Instruct",
|
|
96
|
+
"Qwen/Qwen3-VL-4B-Thinking",
|
|
97
|
+
"Qwen/Qwen3-VL-8B-Instruct",
|
|
98
|
+
"Qwen/Qwen3-VL-8B-Thinking",
|
|
47
99
|
}
|
|
48
100
|
)
|
|
49
101
|
|
|
50
|
-
# SFT allowlist includes core Qwen3 plus Coder
|
|
51
|
-
SFT_SUPPORTED_MODELS: frozenset[str] = frozenset([*QWEN3_MODELS, *QWEN3_CODER_MODELS])
|
|
102
|
+
# SFT allowlist includes core Qwen3 plus Coder and VL families
|
|
103
|
+
SFT_SUPPORTED_MODELS: frozenset[str] = frozenset([*QWEN3_MODELS, *QWEN3_CODER_MODELS, *QWEN3_VL_MODELS])
|
|
104
|
+
|
|
105
|
+
# Models that support <think> reasoning tags
|
|
106
|
+
THINKING_MODELS: frozenset[str] = frozenset(
|
|
107
|
+
{
|
|
108
|
+
"Qwen/Qwen3-4B-Thinking-2507",
|
|
109
|
+
"Qwen/Qwen3-4B-Thinking-2507-FP8",
|
|
110
|
+
"Qwen/Qwen3-30B-A3B-Thinking-2507",
|
|
111
|
+
"Qwen/Qwen3-30B-A3B-Thinking-2507-FP8",
|
|
112
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
113
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507-FP8",
|
|
114
|
+
# Vision-Language Thinking models
|
|
115
|
+
"Qwen/Qwen3-VL-2B-Thinking",
|
|
116
|
+
"Qwen/Qwen3-VL-4B-Thinking",
|
|
117
|
+
"Qwen/Qwen3-VL-8B-Thinking",
|
|
118
|
+
"Qwen/Qwen3-VL-30B-A3B-Thinking",
|
|
119
|
+
"Qwen/Qwen3-VL-32B-Thinking",
|
|
120
|
+
"Qwen/Qwen3-VL-235B-A22B-Thinking",
|
|
121
|
+
}
|
|
122
|
+
)
|
|
52
123
|
|
|
53
124
|
# ------------------------------------------------------------------------------
|
|
54
125
|
# Lifecycle classification (core vs experimental)
|
|
@@ -58,11 +129,17 @@ SFT_SUPPORTED_MODELS: frozenset[str] = frozenset([*QWEN3_MODELS, *QWEN3_CODER_MO
|
|
|
58
129
|
_EXPERIMENTAL_DEFAULTS: frozenset[str] = frozenset(
|
|
59
130
|
{
|
|
60
131
|
# Larger (>= 64B) or bleeding-edge variants are experimental by default.
|
|
132
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
133
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507-FP8",
|
|
61
134
|
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
135
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507-FP8",
|
|
62
136
|
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
137
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
63
138
|
# Thinking variants can fluctuate more rapidly.
|
|
64
139
|
"Qwen/Qwen3-30B-A3B-Thinking-2507",
|
|
140
|
+
"Qwen/Qwen3-30B-A3B-Thinking-2507-FP8",
|
|
65
141
|
"Qwen/Qwen3-4B-Thinking-2507",
|
|
142
|
+
"Qwen/Qwen3-4B-Thinking-2507-FP8",
|
|
66
143
|
}
|
|
67
144
|
)
|
|
68
145
|
|
|
@@ -77,8 +154,8 @@ def _parse_experimental_env() -> frozenset[str]:
|
|
|
77
154
|
# Final experimental set (defaults ∪ optional env override)
|
|
78
155
|
EXPERIMENTAL_MODELS: frozenset[str] = frozenset(_EXPERIMENTAL_DEFAULTS | _parse_experimental_env())
|
|
79
156
|
|
|
80
|
-
# Build catalog entries for
|
|
81
|
-
_ALL_QWEN3_IDS: list[str] = [*QWEN3_MODELS, *QWEN3_CODER_MODELS]
|
|
157
|
+
# Build catalog entries for core, coder, and VL families under unified "Qwen3"
|
|
158
|
+
_ALL_QWEN3_IDS: list[str] = [*QWEN3_MODELS, *QWEN3_CODER_MODELS, *QWEN3_VL_MODELS]
|
|
82
159
|
|
|
83
160
|
CORE_MODELS: frozenset[str] = frozenset(m for m in _ALL_QWEN3_IDS if m not in EXPERIMENTAL_MODELS)
|
|
84
161
|
|
|
@@ -120,6 +197,7 @@ class SupportedModel:
|
|
|
120
197
|
modalities: tuple[str, ...] = ()
|
|
121
198
|
training_modes: tuple[str, ...] = ()
|
|
122
199
|
lifecycle: str = "core" # "core" | "experimental"
|
|
200
|
+
supports_thinking: bool = False # Whether model supports <think> reasoning tags
|
|
123
201
|
|
|
124
202
|
def as_dict(self) -> dict[str, object]:
|
|
125
203
|
data: dict[str, object] = {
|
|
@@ -127,6 +205,7 @@ class SupportedModel:
|
|
|
127
205
|
"family": self.family,
|
|
128
206
|
"provider": self.provider,
|
|
129
207
|
"lifecycle": self.lifecycle,
|
|
208
|
+
"supports_thinking": self.supports_thinking,
|
|
130
209
|
}
|
|
131
210
|
if self.modalities:
|
|
132
211
|
data["modalities"] = list(self.modalities)
|
|
@@ -150,6 +229,7 @@ SUPPORTED_MODELS: tuple[SupportedModel, ...] = tuple(
|
|
|
150
229
|
)
|
|
151
230
|
),
|
|
152
231
|
lifecycle=("experimental" if model in EXPERIMENTAL_MODELS else "core"),
|
|
232
|
+
supports_thinking=(model in THINKING_MODELS),
|
|
153
233
|
)
|
|
154
234
|
for model in _ALL_QWEN3_IDS
|
|
155
235
|
)
|
|
@@ -347,11 +427,66 @@ def training_modes_for_model(model_id: str) -> tuple[str, ...]:
|
|
|
347
427
|
return model.training_modes
|
|
348
428
|
|
|
349
429
|
|
|
430
|
+
def supports_thinking(model_id: str) -> bool:
|
|
431
|
+
"""Return True if the model supports <think> reasoning tags.
|
|
432
|
+
|
|
433
|
+
Thinking models use structured <think>...</think> tags for reasoning.
|
|
434
|
+
Instruct models do not have these tags and should not use thinking-specific logic.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
model_id: Model identifier (can include prefixes like 'rl:', 'fft:', etc.)
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
True if the model supports thinking tags, False otherwise.
|
|
441
|
+
Returns False for unsupported models.
|
|
442
|
+
|
|
443
|
+
Example:
|
|
444
|
+
>>> supports_thinking("Qwen/Qwen3-4B-Thinking-2507")
|
|
445
|
+
True
|
|
446
|
+
>>> supports_thinking("Qwen/Qwen3-4B-Instruct-2507")
|
|
447
|
+
False
|
|
448
|
+
>>> supports_thinking("rl:Qwen/Qwen3-4B-Thinking-2507")
|
|
449
|
+
True
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
canonical = ensure_supported_model(model_id, allow_finetuned_prefixes=True)
|
|
453
|
+
except UnsupportedModelError:
|
|
454
|
+
return False
|
|
455
|
+
model = _MODEL_BY_ID.get(canonical)
|
|
456
|
+
if not model:
|
|
457
|
+
return False
|
|
458
|
+
return model.supports_thinking
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def get_model_metadata(model_id: str) -> SupportedModel | None:
|
|
462
|
+
"""Return the full metadata for a supported model, or None if not supported.
|
|
463
|
+
|
|
464
|
+
Args:
|
|
465
|
+
model_id: Model identifier (can include prefixes like 'rl:', 'fft:', etc.)
|
|
466
|
+
|
|
467
|
+
Returns:
|
|
468
|
+
SupportedModel instance with full metadata, or None if model is not supported.
|
|
469
|
+
|
|
470
|
+
Example:
|
|
471
|
+
>>> meta = get_model_metadata("Qwen/Qwen3-4B-Instruct-2507")
|
|
472
|
+
>>> meta.supports_thinking
|
|
473
|
+
False
|
|
474
|
+
>>> meta.training_modes
|
|
475
|
+
('rl', 'sft')
|
|
476
|
+
"""
|
|
477
|
+
try:
|
|
478
|
+
canonical = ensure_supported_model(model_id, allow_finetuned_prefixes=True)
|
|
479
|
+
except UnsupportedModelError:
|
|
480
|
+
return None
|
|
481
|
+
return _MODEL_BY_ID.get(canonical)
|
|
482
|
+
|
|
483
|
+
|
|
350
484
|
__all__ = [
|
|
351
485
|
"QWEN3_MODELS",
|
|
352
486
|
"QWEN3_CODER_MODELS",
|
|
353
487
|
"RL_SUPPORTED_MODELS",
|
|
354
488
|
"SFT_SUPPORTED_MODELS",
|
|
489
|
+
"THINKING_MODELS",
|
|
355
490
|
"EXPERIMENTAL_MODELS",
|
|
356
491
|
"CORE_MODELS",
|
|
357
492
|
"ExperimentalWarning",
|
|
@@ -373,5 +508,7 @@ __all__ = [
|
|
|
373
508
|
"core_model_ids",
|
|
374
509
|
"format_supported_models",
|
|
375
510
|
"training_modes_for_model",
|
|
511
|
+
"supports_thinking",
|
|
512
|
+
"get_model_metadata",
|
|
376
513
|
]
|
|
377
514
|
|
synth_ai/api/train/__init__.py
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
5
|
__all__ = ["register", "train_command"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def register(cli: Any) -> None:
|
|
9
|
+
from synth_ai.cli.train import register as _register # local import avoids circular dependency
|
|
10
|
+
|
|
11
|
+
_register(cli)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def train_command(*args: Any, **kwargs: Any) -> Any:
|
|
15
|
+
from synth_ai.cli.train import train_command as _train_command # local import avoids cycle
|
|
16
|
+
|
|
17
|
+
return _train_command(*args, **kwargs)
|
synth_ai/api/train/cli.py
CHANGED
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import importlib
|
|
4
4
|
import os
|
|
5
|
+
import time
|
|
5
6
|
from collections.abc import Callable, Mapping
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from typing import Any, cast
|
|
@@ -36,20 +37,41 @@ from .utils import (
|
|
|
36
37
|
)
|
|
37
38
|
|
|
38
39
|
|
|
39
|
-
def _discover_dataset_candidates(
|
|
40
|
+
def _discover_dataset_candidates(
|
|
41
|
+
config_path: Path, limit: int = 50, timeout: float = 10.0
|
|
42
|
+
) -> list[Path]:
|
|
43
|
+
root = config_path.parent
|
|
44
|
+
parent = root.parent
|
|
45
|
+
cwd = Path.cwd()
|
|
46
|
+
|
|
40
47
|
search_dirs: list[Path] = [
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
48
|
+
root,
|
|
49
|
+
root / "datasets",
|
|
50
|
+
parent,
|
|
51
|
+
parent / "datasets",
|
|
52
|
+
parent / "ft_data",
|
|
53
|
+
cwd,
|
|
54
|
+
cwd / "datasets",
|
|
55
|
+
cwd / "ft_data",
|
|
44
56
|
REPO_ROOT / "datasets",
|
|
57
|
+
REPO_ROOT / "ft_data",
|
|
58
|
+
REPO_ROOT / "traces",
|
|
45
59
|
]
|
|
46
60
|
|
|
47
61
|
candidates: list[Path] = []
|
|
48
62
|
seen: set[Path] = set()
|
|
63
|
+
start = time.monotonic()
|
|
64
|
+
timed_out = False
|
|
49
65
|
for directory in search_dirs:
|
|
66
|
+
if timed_out or time.monotonic() - start > timeout:
|
|
67
|
+
timed_out = True
|
|
68
|
+
break
|
|
50
69
|
if not directory.exists() or not directory.is_dir():
|
|
51
70
|
continue
|
|
52
71
|
for path in directory.rglob("*.jsonl"):
|
|
72
|
+
if time.monotonic() - start > timeout:
|
|
73
|
+
timed_out = True
|
|
74
|
+
break
|
|
53
75
|
try:
|
|
54
76
|
resolved = path.resolve()
|
|
55
77
|
except OSError:
|
|
@@ -300,7 +322,7 @@ def train_command(
|
|
|
300
322
|
def _wait_for_training_file(
|
|
301
323
|
backend_base: str, api_key: str, file_id: str, *, timeout: float = 120.0
|
|
302
324
|
) -> None:
|
|
303
|
-
url = f"{backend_base}/
|
|
325
|
+
url = f"{backend_base.rstrip('/')}/files/{file_id}"
|
|
304
326
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
305
327
|
elapsed = 0.0
|
|
306
328
|
interval = 2.0
|
|
@@ -524,7 +546,7 @@ def handle_sft(
|
|
|
524
546
|
click.echo("Validating validation dataset…")
|
|
525
547
|
validate_sft_jsonl(build.validation_file)
|
|
526
548
|
|
|
527
|
-
upload_url = f"{backend_base}/
|
|
549
|
+
upload_url = f"{backend_base.rstrip('/')}/files"
|
|
528
550
|
click.echo("\n=== Uploading Training Data ===")
|
|
529
551
|
click.echo(f"Dataset: {build.train_file}")
|
|
530
552
|
click.echo(f"Destination: {upload_url}")
|
|
@@ -579,7 +601,8 @@ def handle_sft(
|
|
|
579
601
|
try:
|
|
580
602
|
_wait_for_training_file(backend_base, synth_key, train_file_id)
|
|
581
603
|
except click.ClickException as exc:
|
|
582
|
-
|
|
604
|
+
click.echo(f"[WARN] File readiness check failed: {exc}")
|
|
605
|
+
click.echo("Proceeding anyway - backend will validate file during job creation...")
|
|
583
606
|
|
|
584
607
|
click.echo("\n=== Creating Training Job ===")
|
|
585
608
|
click.echo("Job payload preview:")
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
-
import os
|
|
5
4
|
from collections.abc import Iterable
|
|
6
5
|
from dataclasses import dataclass
|
|
7
6
|
from pathlib import Path
|
|
@@ -11,7 +10,9 @@ import click
|
|
|
11
10
|
from .utils import REPO_ROOT, load_toml, preview_json
|
|
12
11
|
|
|
13
12
|
_SKIP_DIRS = {".git", "__pycache__", ".venv", "node_modules", "dist", "build"}
|
|
14
|
-
|
|
13
|
+
|
|
14
|
+
_STATE_DIR = Path.home() / ".synth-ai"
|
|
15
|
+
_STATE_FILE = _STATE_DIR / "train_cli.json"
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
@dataclass(slots=True)
|
|
@@ -23,8 +24,8 @@ class ConfigCandidate:
|
|
|
23
24
|
def _load_last_config() -> Path | None:
|
|
24
25
|
"""Load the last used training config path from state file."""
|
|
25
26
|
try:
|
|
26
|
-
if
|
|
27
|
-
with open(
|
|
27
|
+
if _STATE_FILE.is_file():
|
|
28
|
+
with _STATE_FILE.open() as fh:
|
|
28
29
|
data = json.load(fh)
|
|
29
30
|
if isinstance(data, dict):
|
|
30
31
|
last_config = data.get("LAST_CONFIG")
|
|
@@ -41,14 +42,14 @@ def _save_last_config(config_path: Path) -> None:
|
|
|
41
42
|
"""Save the last used training config path to state file."""
|
|
42
43
|
try:
|
|
43
44
|
data = {}
|
|
44
|
-
if
|
|
45
|
-
with open(
|
|
45
|
+
if _STATE_FILE.is_file():
|
|
46
|
+
with _STATE_FILE.open() as fh:
|
|
46
47
|
data = json.load(fh) or {}
|
|
47
48
|
if not isinstance(data, dict):
|
|
48
49
|
data = {}
|
|
49
50
|
data["LAST_CONFIG"] = str(config_path.resolve())
|
|
50
|
-
|
|
51
|
-
with open(
|
|
51
|
+
_STATE_DIR.mkdir(parents=True, exist_ok=True)
|
|
52
|
+
with _STATE_FILE.open("w") as fh:
|
|
52
53
|
json.dump(data, fh)
|
|
53
54
|
except Exception:
|
|
54
55
|
pass
|
|
@@ -77,6 +78,7 @@ def _iter_candidate_paths() -> Iterable[Path]:
|
|
|
77
78
|
REPO_ROOT / "configs",
|
|
78
79
|
REPO_ROOT / "examples",
|
|
79
80
|
REPO_ROOT / "training",
|
|
81
|
+
REPO_ROOT / "synth_ai" / "demos",
|
|
80
82
|
]
|
|
81
83
|
for base in preferred:
|
|
82
84
|
if not base.exists():
|
|
@@ -148,6 +150,10 @@ def discover_configs(explicit: list[str], *, requested_type: str | None) -> list
|
|
|
148
150
|
raise click.ClickException(f"Config not found: {path}")
|
|
149
151
|
data = load_toml(path)
|
|
150
152
|
cfg_type = _infer_config_type(data)
|
|
153
|
+
if cfg_type == "unknown":
|
|
154
|
+
raise click.ClickException(
|
|
155
|
+
f"Config {path} is missing algorithm.type/method metadata. Add type = 'rl' or 'sft'."
|
|
156
|
+
)
|
|
151
157
|
candidates.append(ConfigCandidate(path=path, train_type=cfg_type))
|
|
152
158
|
seen.add(path)
|
|
153
159
|
|
|
@@ -162,10 +168,12 @@ def discover_configs(explicit: list[str], *, requested_type: str | None) -> list
|
|
|
162
168
|
except Exception:
|
|
163
169
|
continue
|
|
164
170
|
cfg_type = _infer_config_type(data)
|
|
171
|
+
if cfg_type == "unknown":
|
|
172
|
+
continue
|
|
165
173
|
candidates.append(ConfigCandidate(path=path, train_type=cfg_type))
|
|
166
174
|
|
|
167
175
|
if requested_type and requested_type != "auto":
|
|
168
|
-
candidates = [c for c in candidates if c.train_type
|
|
176
|
+
candidates = [c for c in candidates if c.train_type == requested_type]
|
|
169
177
|
|
|
170
178
|
# De-dupe by path and keep deterministic ordering by directory depth then name
|
|
171
179
|
candidates.sort(key=lambda c: (len(c.path.parts), str(c.path)))
|
|
@@ -196,9 +204,8 @@ def prompt_for_config(
|
|
|
196
204
|
|
|
197
205
|
click.echo("Select a training config:")
|
|
198
206
|
for idx, cand in enumerate(candidates, start=1):
|
|
199
|
-
label = cand.train_type if cand.train_type != "unknown" else "?"
|
|
200
207
|
last_marker = " (last used)" if last_config and cand.path.resolve() == last_config else ""
|
|
201
|
-
click.echo(f" {idx})
|
|
208
|
+
click.echo(f" {idx}) {cand.path}{last_marker}")
|
|
202
209
|
click.echo(" 0) Abort")
|
|
203
210
|
|
|
204
211
|
choice = click.prompt("Enter choice", type=int, default=default_idx)
|
|
@@ -8,6 +8,7 @@ from pathlib import Path
|
|
|
8
8
|
from typing import Any, cast
|
|
9
9
|
|
|
10
10
|
import click
|
|
11
|
+
from synth_ai.utils.env import resolve_env_var
|
|
11
12
|
|
|
12
13
|
from . import task_app
|
|
13
14
|
from .utils import REPO_ROOT, mask_value, read_env_file, write_env_value
|
|
@@ -232,18 +233,16 @@ def _resolve_key(resolver: EnvResolver, spec: KeySpec) -> str:
|
|
|
232
233
|
_maybe_persist(resolver, spec, env_val)
|
|
233
234
|
os.environ[spec.name] = env_val
|
|
234
235
|
return env_val
|
|
235
|
-
options: list[tuple[str, Callable[[], str | None]]] = []
|
|
236
236
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
return value
|
|
237
|
+
resolve_env_var(spec.name)
|
|
238
|
+
resolved_value = os.environ.get(spec.name)
|
|
239
|
+
if resolved_value:
|
|
240
|
+
click.echo(f"Found {spec.name} via secrets helper: {mask_value(resolved_value)}")
|
|
241
|
+
_maybe_persist(resolver, spec, resolved_value)
|
|
242
|
+
os.environ[spec.name] = resolved_value
|
|
243
|
+
return resolved_value
|
|
245
244
|
|
|
246
|
-
options
|
|
245
|
+
options: list[tuple[str, Callable[[], str | None]]] = []
|
|
247
246
|
|
|
248
247
|
def _pick_env() -> str | None:
|
|
249
248
|
resolver.select_new_env()
|
|
@@ -276,6 +275,10 @@ def _resolve_key(resolver: EnvResolver, spec: KeySpec) -> str:
|
|
|
276
275
|
|
|
277
276
|
def _maybe_persist(resolver: EnvResolver, spec: KeySpec, value: str) -> None:
|
|
278
277
|
# Automatically save (no prompt)
|
|
278
|
+
# Skip auto-persisting TASK_APP_URL to prevent overwriting CLI overrides
|
|
279
|
+
if spec.name == "TASK_APP_URL":
|
|
280
|
+
click.echo(f"Skipping auto-persist for {spec.name} (use CLI flags to override)")
|
|
281
|
+
return
|
|
279
282
|
resolver.set_value(spec.name, value)
|
|
280
283
|
click.echo(f"Saved {spec.name} to {resolver.current_path}")
|
|
281
284
|
|