synth-ai 0.2.9.dev4__py3-none-any.whl → 0.2.9.dev6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/__init__.py +16 -0
- examples/crafter_debug_render.py +23 -17
- examples/qwen_coder/README.md +102 -0
- examples/qwen_coder/_shared.py +113 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +61 -0
- examples/qwen_coder/configs/coder_lora_4b.toml +57 -0
- examples/qwen_coder/configs/coder_lora_small.toml +58 -0
- examples/qwen_coder/generate_dataset.py +98 -0
- examples/qwen_coder/infer_ft_smoke.py +64 -0
- examples/qwen_coder/infer_prod_proxy.py +73 -0
- examples/qwen_coder/infer_via_synth.py +87 -0
- examples/qwen_coder/scripts/infer_coder.sh +18 -0
- examples/qwen_coder/scripts/train_coder_30b.sh +21 -0
- examples/qwen_coder/sft_full_17b.py +103 -0
- examples/qwen_coder/sft_lora_30b.py +110 -0
- examples/qwen_coder/subset_jsonl.py +38 -0
- examples/qwen_coder/validate_jsonl.py +59 -0
- examples/rl/configs/eval_base_qwen.toml +1 -1
- examples/rl/configs/rl_from_base_qwen17.toml +1 -1
- examples/rl/download_dataset.py +26 -10
- examples/rl/run_eval.py +53 -52
- examples/rl/run_rl_and_save.py +29 -12
- examples/rl/task_app/math_single_step.py +180 -41
- examples/rl/task_app/math_task_app.py +14 -6
- examples/sft/README.md +139 -0
- examples/sft/configs/crafter_fft_qwen0p6b.toml +44 -0
- examples/sft/configs/crafter_lora_qwen0p6b.toml +45 -0
- examples/sft/evaluate.py +117 -0
- examples/sft/export_dataset.py +117 -0
- examples/sft/generate_traces.py +162 -0
- examples/swe/__init__.py +12 -0
- examples/swe/task_app/README.md +105 -0
- examples/swe/task_app/__init__.py +2 -0
- examples/swe/task_app/grpo_swe_mini.py +571 -0
- examples/swe/task_app/grpo_swe_mini_task_app.py +136 -0
- examples/swe/task_app/hosted/README.md +173 -0
- examples/swe/task_app/hosted/__init__.py +5 -0
- examples/swe/task_app/hosted/branching.py +143 -0
- examples/swe/task_app/hosted/environment_routes.py +1289 -0
- examples/swe/task_app/hosted/envs/__init__.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
- examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
- examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
- examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
- examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
- examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +1164 -0
- examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
- examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
- examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
- examples/swe/task_app/hosted/hosted_app.py +204 -0
- examples/swe/task_app/hosted/inference/__init__.py +5 -0
- examples/swe/task_app/hosted/inference/openai_client.py +618 -0
- examples/swe/task_app/hosted/main.py +100 -0
- examples/swe/task_app/hosted/policy_routes.py +1079 -0
- examples/swe/task_app/hosted/registry.py +195 -0
- examples/swe/task_app/hosted/rollout.py +1869 -0
- examples/swe/task_app/hosted/storage/__init__.py +5 -0
- examples/swe/task_app/hosted/storage/volume.py +211 -0
- examples/swe/task_app/hosted/test_agents.py +161 -0
- examples/swe/task_app/hosted/test_service.py +137 -0
- examples/swe/task_app/hosted/utils.py +62 -0
- examples/vlm/README.md +68 -0
- examples/vlm/configs/crafter_vlm_gpt4o.toml +44 -0
- examples/vlm/crafter_image_only_agent.py +207 -0
- examples/vlm/crafter_openai_vlm_agent.py +277 -0
- examples/vlm/filter_image_rows.py +63 -0
- examples/vlm/run_crafter_vlm_benchmark.py +316 -0
- examples/warming_up_to_rl/analyze_trace_db.py +12 -10
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +11 -1
- examples/warming_up_to_rl/export_trace_sft.py +218 -36
- examples/warming_up_to_rl/groq_test.py +15 -8
- examples/warming_up_to_rl/manage_secrets.py +29 -25
- examples/warming_up_to_rl/readme.md +9 -2
- examples/warming_up_to_rl/run_eval.py +137 -61
- examples/warming_up_to_rl/run_fft_and_save.py +131 -60
- examples/warming_up_to_rl/run_local_rollout.py +88 -39
- examples/warming_up_to_rl/run_local_rollout_modal.py +114 -28
- examples/warming_up_to_rl/run_local_rollout_parallel.py +81 -20
- examples/warming_up_to_rl/run_local_rollout_traced.py +126 -23
- examples/warming_up_to_rl/run_rl_and_save.py +35 -12
- examples/warming_up_to_rl/run_rollout_remote.py +44 -19
- examples/warming_up_to_rl/task_app/README.md +6 -2
- examples/warming_up_to_rl/task_app/grpo_crafter.py +319 -57
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +11 -30
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +9 -11
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +137 -182
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +150 -57
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +105 -69
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +19 -7
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +45 -42
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +47 -45
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +198 -92
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +0 -2
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +361 -263
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +21 -23
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +394 -274
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +56 -62
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +6 -15
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +4 -3
- synth/__init__.py +14 -0
- synth_ai/__init__.py +20 -4
- synth_ai/api/models/supported.py +376 -0
- synth_ai/api/train/builders.py +157 -26
- synth_ai/api/train/cli.py +213 -57
- synth_ai/api/train/config_finder.py +65 -5
- synth_ai/api/train/env_resolver.py +33 -15
- synth_ai/api/train/pollers.py +13 -4
- synth_ai/api/train/supported_algos.py +139 -0
- synth_ai/api/train/task_app.py +5 -3
- synth_ai/api/train/utils.py +33 -48
- synth_ai/cli/__init__.py +19 -4
- synth_ai/cli/_modal_wrapper.py +28 -0
- synth_ai/cli/_typer_patch.py +49 -0
- synth_ai/cli/balance.py +2 -3
- synth_ai/cli/calc.py +1 -1
- synth_ai/cli/demo.py +21 -6
- synth_ai/cli/recent.py +2 -2
- synth_ai/cli/rl_demo.py +77 -17
- synth_ai/cli/root.py +116 -39
- synth_ai/cli/status.py +2 -2
- synth_ai/cli/task_apps.py +1709 -243
- synth_ai/cli/traces.py +7 -4
- synth_ai/cli/turso.py +73 -0
- synth_ai/cli/watch.py +12 -18
- synth_ai/core/experiment.py +0 -2
- synth_ai/demo_registry.py +68 -31
- synth_ai/demos/core/cli.py +516 -194
- synth_ai/demos/demo_task_apps/__init__.py +3 -3
- synth_ai/demos/demo_task_apps/core.py +64 -28
- synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +2 -3
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +37 -30
- synth_ai/demos/demo_task_apps/math/_common.py +1 -2
- synth_ai/demos/demo_task_apps/math/app.py +2 -1
- synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -6
- synth_ai/demos/demo_task_apps/math/modal_task_app.py +183 -82
- synth_ai/demos/demo_task_apps/math/task_app_entry.py +0 -2
- synth_ai/environments/examples/bandit/engine.py +12 -4
- synth_ai/environments/examples/bandit/taskset.py +4 -4
- synth_ai/environments/examples/crafter_classic/environment.py +76 -1
- synth_ai/environments/reproducibility/tree.py +5 -6
- synth_ai/environments/service/app.py +11 -12
- synth_ai/environments/service/core_routes.py +10 -9
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/core.py +1 -0
- synth_ai/environments/tasks/filters.py +5 -6
- synth_ai/environments/tasks/utils.py +4 -5
- synth_ai/evals/base.py +0 -2
- synth_ai/handshake.py +11 -9
- synth_ai/http.py +1 -1
- synth_ai/http_client.py +43 -11
- synth_ai/inference/__init__.py +0 -2
- synth_ai/inference/client.py +20 -6
- synth_ai/jobs/client.py +103 -78
- synth_ai/learning/__init__.py +41 -6
- synth_ai/learning/algorithms.py +14 -0
- synth_ai/learning/client.py +121 -29
- synth_ai/learning/config.py +2 -40
- synth_ai/learning/constants.py +0 -2
- synth_ai/learning/ft_client.py +4 -56
- synth_ai/learning/health.py +13 -7
- synth_ai/learning/jobs.py +43 -47
- synth_ai/{rl → learning/rl}/__init__.py +14 -5
- synth_ai/learning/rl/client.py +267 -0
- synth_ai/learning/rl/config.py +31 -0
- synth_ai/{rl → learning/rl}/contracts.py +5 -10
- synth_ai/{rl → learning/rl}/env_keys.py +45 -16
- synth_ai/learning/rl/secrets.py +13 -0
- synth_ai/learning/rl_client.py +2 -253
- synth_ai/learning/sft/__init__.py +29 -0
- synth_ai/learning/sft/client.py +68 -0
- synth_ai/learning/sft/config.py +270 -0
- synth_ai/learning/sft/data.py +295 -0
- synth_ai/learning/sse.py +25 -26
- synth_ai/learning/validators.py +25 -24
- synth_ai/lm/__init__.py +21 -47
- synth_ai/task/__init__.py +26 -27
- synth_ai/task/apps/__init__.py +18 -19
- synth_ai/task/auth.py +35 -23
- synth_ai/task/client.py +15 -13
- synth_ai/task/contracts.py +37 -35
- synth_ai/task/datasets.py +9 -6
- synth_ai/task/errors.py +11 -10
- synth_ai/task/health.py +17 -11
- synth_ai/task/json.py +58 -24
- synth_ai/task/proxy.py +15 -14
- synth_ai/task/rubrics.py +22 -15
- synth_ai/task/server.py +43 -17
- synth_ai/task/tracing_utils.py +12 -7
- synth_ai/task/validators.py +0 -1
- synth_ai/task/vendors.py +5 -7
- synth_ai/tracing_v3/__init__.py +2 -0
- synth_ai/tracing_v3/abstractions.py +21 -4
- synth_ai/tracing_v3/db_config.py +26 -1
- synth_ai/tracing_v3/decorators.py +18 -15
- synth_ai/tracing_v3/examples/basic_usage.py +3 -2
- synth_ai/tracing_v3/hooks.py +6 -4
- synth_ai/tracing_v3/llm_call_record_helpers.py +6 -6
- synth_ai/tracing_v3/replica_sync.py +1 -0
- synth_ai/tracing_v3/session_tracer.py +63 -16
- synth_ai/tracing_v3/storage/base.py +89 -1
- synth_ai/tracing_v3/storage/config.py +21 -8
- synth_ai/tracing_v3/storage/factory.py +10 -8
- synth_ai/tracing_v3/storage/utils.py +4 -2
- synth_ai/tracing_v3/turso/daemon.py +7 -2
- synth_ai/tracing_v3/turso/models.py +5 -2
- synth_ai/tracing_v3/turso/native_manager.py +1173 -0
- synth_ai/tracing_v3/utils.py +4 -3
- synth_ai/v0/api/__init__.py +8 -0
- synth_ai/v0/api/models/__init__.py +8 -0
- synth_ai/v0/api/models/supported.py +8 -0
- synth_ai/v0/config/__init__.py +15 -0
- synth_ai/v0/config/base_url.py +12 -0
- synth_ai/v0/lm/__init__.py +51 -0
- synth_ai/{lm → v0/lm}/caching/ephemeral.py +3 -5
- synth_ai/{lm → v0/lm}/caching/handler.py +4 -4
- synth_ai/{lm → v0/lm}/caching/initialize.py +1 -1
- synth_ai/{lm → v0/lm}/caching/persistent.py +1 -1
- synth_ai/{lm → v0/lm}/config.py +6 -1
- synth_ai/{lm → v0/lm}/core/all.py +9 -9
- synth_ai/{lm → v0/lm}/core/exceptions.py +0 -2
- synth_ai/{lm → v0/lm}/core/main.py +19 -7
- synth_ai/{lm → v0/lm}/core/main_v3.py +10 -10
- synth_ai/{lm → v0/lm}/core/synth_models.py +2 -15
- synth_ai/{lm → v0/lm}/core/vendor_clients.py +6 -4
- synth_ai/{lm → v0/lm}/overrides.py +4 -4
- synth_ai/{lm → v0/lm}/provider_support/anthropic.py +4 -4
- synth_ai/{lm → v0/lm}/provider_support/openai.py +5 -5
- synth_ai/{lm → v0/lm}/structured_outputs/handler.py +5 -5
- synth_ai/{lm → v0/lm}/structured_outputs/rehabilitate.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/core/anthropic_api.py +16 -16
- synth_ai/{lm → v0/lm}/vendors/core/gemini_api.py +5 -5
- synth_ai/{lm → v0/lm}/vendors/core/mistral_api.py +5 -5
- synth_ai/{lm → v0/lm}/vendors/core/openai_api.py +12 -10
- synth_ai/{lm → v0/lm}/vendors/openai_standard.py +11 -9
- synth_ai/{lm → v0/lm}/vendors/openai_standard_responses.py +8 -5
- synth_ai/{lm → v0/lm}/vendors/supported/custom_endpoint.py +4 -6
- synth_ai/{lm → v0/lm}/vendors/supported/deepseek.py +2 -2
- synth_ai/{lm → v0/lm}/vendors/supported/grok.py +2 -2
- synth_ai/{lm → v0/lm}/vendors/supported/groq.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/supported/ollama.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/supported/openrouter.py +3 -3
- synth_ai/{lm → v0/lm}/vendors/supported/together.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/synth_client.py +38 -11
- synth_ai/v0/tracing/upload.py +32 -135
- synth_ai/v0/tracing_v3/__init__.py +10 -0
- synth_ai/v0/tracing_v3/abstractions.py +3 -0
- synth_ai/v0/tracing_v3/decorators.py +3 -0
- synth_ai/v0/tracing_v3/llm_call_record_helpers.py +3 -0
- synth_ai/v0/tracing_v3/session_tracer.py +3 -0
- synth_ai-0.2.9.dev6.dist-info/METADATA +191 -0
- {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/RECORD +291 -264
- {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/top_level.txt +1 -0
- examples/common_old/backend.py +0 -21
- examples/evals_old/README.md +0 -98
- examples/evals_old/__init__.py +0 -6
- examples/evals_old/compare_models.py +0 -1037
- examples/evals_old/example_log.md +0 -145
- examples/evals_old/run_demo.sh +0 -126
- examples/evals_old/trace_analysis.py +0 -270
- examples/finetuning_old/_backup_synth_qwen/config.toml +0 -29
- examples/finetuning_old/_backup_synth_qwen/example_log.md +0 -324
- examples/finetuning_old/_backup_synth_qwen/filter_traces.py +0 -60
- examples/finetuning_old/_backup_synth_qwen/filter_traces_achievements.py +0 -239
- examples/finetuning_old/_backup_synth_qwen/purge_v3_traces.py +0 -109
- examples/finetuning_old/_backup_synth_qwen/react_agent_lm.py +0 -1924
- examples/finetuning_old/_backup_synth_qwen/readme.md +0 -49
- examples/finetuning_old/_backup_synth_qwen/run_crafter_qwen4b.py +0 -114
- examples/finetuning_old/_backup_synth_qwen/run_demo.sh +0 -195
- examples/finetuning_old/_backup_synth_qwen/sft_kickoff.py +0 -118
- examples/finetuning_old/synth_qwen_v1/README.md +0 -68
- examples/finetuning_old/synth_qwen_v1/filter_traces.py +0 -60
- examples/finetuning_old/synth_qwen_v1/filter_traces_achievements.py +0 -239
- examples/finetuning_old/synth_qwen_v1/finetune.py +0 -46
- examples/finetuning_old/synth_qwen_v1/hello_ft_model.py +0 -71
- examples/finetuning_old/synth_qwen_v1/infer.py +0 -37
- examples/finetuning_old/synth_qwen_v1/poll.py +0 -44
- examples/finetuning_old/synth_qwen_v1/prepare_data.py +0 -35
- examples/finetuning_old/synth_qwen_v1/purge_v3_traces.py +0 -109
- examples/finetuning_old/synth_qwen_v1/react_agent_lm.py +0 -1932
- examples/finetuning_old/synth_qwen_v1/run_crafter_sft_job.py +0 -207
- examples/finetuning_old/synth_qwen_v1/run_ft_job.py +0 -232
- examples/finetuning_old/synth_qwen_v1/upload_data.py +0 -34
- examples/finetuning_old/synth_qwen_v1/util.py +0 -147
- examples/rl_old/task_app.py +0 -962
- examples/warming_up_to_rl/old/event_rewards.md +0 -234
- examples/warming_up_to_rl/old/notes.md +0 -73
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_stepwise_rewards.py +0 -58
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -738
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
- synth_ai/environments/examples/sokoban/units/astar_common.py +0 -95
- synth_ai/experimental/synth_oss.py +0 -446
- synth_ai/install_sqld.sh +0 -40
- synth_ai/learning/filtering.py +0 -0
- synth_ai/learning/offline/dpo.py +0 -0
- synth_ai/learning/offline/providers.py +0 -7
- synth_ai/learning/offline/sft.py +0 -0
- synth_ai/learning/offline/shared.py +0 -0
- synth_ai/learning/online/grpo.py +0 -0
- synth_ai/learning/online/irft.py +0 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
- synth_ai/learning/prompts/gepa.py +0 -0
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
- synth_ai/learning/prompts/mipro.py +0 -289
- synth_ai/learning/prompts/random_search.py +0 -246
- synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
- synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
- synth_ai/rl/secrets.py +0 -19
- synth_ai/scripts/verify_rewards.py +0 -100
- synth_ai/tracing/__init__.py +0 -30
- synth_ai/tracing_v1/__init__.py +0 -33
- synth_ai/tracing_v3/turso/__init__.py +0 -25
- synth_ai/tracing_v3/turso/manager.py +0 -774
- synth_ai/zyk/__init__.py +0 -30
- synth_ai-0.2.9.dev4.dist-info/METADATA +0 -131
- /synth_ai/{lm → v0/lm}/caching/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/caching/constants.py +0 -0
- /synth_ai/{lm → v0/lm}/caching/dbs.py +0 -0
- /synth_ai/{lm → v0/lm}/constants.py +0 -0
- /synth_ai/{lm → v0/lm}/core/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/monitor.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/statefulness.py +0 -0
- /synth_ai/{lm → v0/lm}/injection.py +0 -0
- /synth_ai/{lm → v0/lm}/provider_support/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/provider_support/suppress_logging.py +0 -0
- /synth_ai/{lm → v0/lm}/structured_outputs/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/structured_outputs/inject.py +0 -0
- /synth_ai/{lm → v0/lm}/tools/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/tools/base.py +0 -0
- /synth_ai/{lm → v0/lm}/unified_interface.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/base.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/core/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/core/synth_dev_api.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/local/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/local/ollama.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/retries.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/supported/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/warmup.py +0 -0
- {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,207 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Crafter → SFT end-to-end runner (single script).
|
|
4
|
-
|
|
5
|
-
Pipeline:
|
|
6
|
-
1) Read v3 traces DB (sqld/Turso) and filter sessions (achievements >= min)
|
|
7
|
-
2) Export OpenAI-format JSONL
|
|
8
|
-
3) Upload file, create/start SFT job, poll to terminal
|
|
9
|
-
4) (Optional) quick inference with the resulting model
|
|
10
|
-
|
|
11
|
-
Usage:
|
|
12
|
-
uv run python examples/finetuning/synth_qwen_v1/run_crafter_sft_job.py --mode dev \
|
|
13
|
-
--db /Users/joshpurtell/Documents/GitHub/synth-ai/traces/v3/synth_ai.db/dbs/default/data \
|
|
14
|
-
--min-achievements 2 --output examples/finetuning/synth_qwen_v1/data/training_crafter.jsonl
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
from __future__ import annotations
|
|
18
|
-
|
|
19
|
-
import argparse
|
|
20
|
-
import asyncio
|
|
21
|
-
import json
|
|
22
|
-
import os
|
|
23
|
-
import sys
|
|
24
|
-
from pathlib import Path
|
|
25
|
-
from typing import Any
|
|
26
|
-
|
|
27
|
-
# Repo root on sys.path for local runs
|
|
28
|
-
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
|
29
|
-
|
|
30
|
-
from synth_ai.learning import FtClient, JobHandle, validate_training_jsonl # type: ignore
|
|
31
|
-
from synth_ai.inference import InferenceClient # type: ignore
|
|
32
|
-
from examples.finetuning.synth_qwen_v1.util import load_env, save_state # type: ignore
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def parse_args() -> argparse.Namespace:
|
|
36
|
-
p = argparse.ArgumentParser(description="Crafter traces → SFT JSONL → FT job runner")
|
|
37
|
-
p.add_argument("--mode", choices=["local", "dev", "prod"], default=None)
|
|
38
|
-
p.add_argument(
|
|
39
|
-
"--db",
|
|
40
|
-
default=str(
|
|
41
|
-
Path(__file__).resolve().parents[3]
|
|
42
|
-
/ "traces/v3/synth_ai.db/dbs/default/data"
|
|
43
|
-
),
|
|
44
|
-
help="Path to sqld internal data file or sqlite+aiosqlite URL",
|
|
45
|
-
)
|
|
46
|
-
p.add_argument("--output", default=str(Path(__file__).parent / "data" / "training_crafter.jsonl"))
|
|
47
|
-
p.add_argument("--min-achievements", type=int, default=2)
|
|
48
|
-
p.add_argument("--max-cost", type=float, default=10.0)
|
|
49
|
-
p.add_argument("--max-tokens", type=int, default=100000)
|
|
50
|
-
p.add_argument("--model", default="Qwen/Qwen3-0.6B")
|
|
51
|
-
p.add_argument("--epochs", type=int, default=1)
|
|
52
|
-
p.add_argument("--batch-size", type=int, default=4)
|
|
53
|
-
p.add_argument("--no-infer", action="store_true")
|
|
54
|
-
p.add_argument("--models", nargs="*", help="Optional model name filter (any match)")
|
|
55
|
-
return p.parse_args()
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def _normalize_db_url(raw: str) -> str:
|
|
59
|
-
if raw.endswith(".db") and not raw.startswith("sqlite"):
|
|
60
|
-
return f"sqlite+aiosqlite:///{raw}"
|
|
61
|
-
if raw.startswith("sqlite+aiosqlite:///"):
|
|
62
|
-
return raw
|
|
63
|
-
if raw.startswith("sqlite:///") and raw.endswith(".db"):
|
|
64
|
-
return raw.replace("sqlite:///", "sqlite+aiosqlite:///")
|
|
65
|
-
return raw
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
async def extract_jsonl_from_traces(db_url: str, output_path: str, cfg: dict[str, Any]) -> int:
|
|
69
|
-
# Import extractor with robust fallbacks across dist variants
|
|
70
|
-
Extractor = None
|
|
71
|
-
try:
|
|
72
|
-
from synth_ai.environments.examples.crafter_classic.agent_demos.crafter_modal_ft.filter_traces_sft_turso import ( # type: ignore
|
|
73
|
-
FinetuningDataExtractorV3 as _Ex,
|
|
74
|
-
)
|
|
75
|
-
Extractor = _Ex
|
|
76
|
-
except Exception:
|
|
77
|
-
try:
|
|
78
|
-
from synth_ai.environments.examples.crafter_classic.agent_demos.crafter_openai_ft.filter_traces_sft_turso import ( # type: ignore
|
|
79
|
-
FinetuningDataExtractorV3 as _Ex,
|
|
80
|
-
)
|
|
81
|
-
Extractor = _Ex
|
|
82
|
-
except Exception as e:
|
|
83
|
-
raise ImportError("FinetuningDataExtractorV3 not available in current build") from e
|
|
84
|
-
|
|
85
|
-
filters: dict[str, Any] = cfg.get("filters", {})
|
|
86
|
-
min_ach = int(filters.get("min_achievements", 2))
|
|
87
|
-
max_cost = float(filters.get("max_cost", 10.0))
|
|
88
|
-
max_tokens = int(filters.get("max_tokens", 100000))
|
|
89
|
-
models: list[str] = list(filters.get("models", []) or [])
|
|
90
|
-
|
|
91
|
-
kept: list[str] = []
|
|
92
|
-
async with Extractor(db_url) as ex:
|
|
93
|
-
sessions = await ex.get_all_sessions()
|
|
94
|
-
for _, row in sessions.iterrows():
|
|
95
|
-
sid = row["session_id"]
|
|
96
|
-
metrics = await ex.get_session_metrics(sid)
|
|
97
|
-
if float(metrics.get("total_cost", 0.0)) > max_cost:
|
|
98
|
-
continue
|
|
99
|
-
if int(metrics.get("total_tokens", 0) or 0) > max_tokens:
|
|
100
|
-
continue
|
|
101
|
-
# Optional model filter
|
|
102
|
-
if models:
|
|
103
|
-
model_df = await ex.db_manager.query_traces(
|
|
104
|
-
"""
|
|
105
|
-
SELECT DISTINCT model_name
|
|
106
|
-
FROM events
|
|
107
|
-
WHERE session_id = :session_id
|
|
108
|
-
AND event_type = 'cais'
|
|
109
|
-
AND model_name IS NOT NULL
|
|
110
|
-
""",
|
|
111
|
-
{"session_id": sid},
|
|
112
|
-
)
|
|
113
|
-
session_models = model_df["model_name"].tolist() if model_df is not None and not model_df.empty else []
|
|
114
|
-
if not any(m in session_models for m in models):
|
|
115
|
-
continue
|
|
116
|
-
ach = await ex.get_session_achievements(sid) or []
|
|
117
|
-
if len([a for a in ach if a]) >= min_ach:
|
|
118
|
-
kept.append(sid)
|
|
119
|
-
|
|
120
|
-
data = await ex.extract_openai_format(kept)
|
|
121
|
-
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
|
122
|
-
with open(output_path, "w") as f:
|
|
123
|
-
for exm in data:
|
|
124
|
-
f.write(json.dumps(exm) + "\n")
|
|
125
|
-
return len(data)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
async def run(args: argparse.Namespace) -> None:
|
|
129
|
-
base_url, api_key = load_env(args.mode)
|
|
130
|
-
|
|
131
|
-
# 1) Filter and export JSONL from v3 traces
|
|
132
|
-
db_url = _normalize_db_url(args.db)
|
|
133
|
-
cfg = {
|
|
134
|
-
"mode": "trajectory",
|
|
135
|
-
"filters": {
|
|
136
|
-
"min_achievements": int(args.min_achievements),
|
|
137
|
-
"max_cost": float(args.max_cost),
|
|
138
|
-
"max_tokens": int(args.max_tokens),
|
|
139
|
-
"models": args.models or [],
|
|
140
|
-
},
|
|
141
|
-
}
|
|
142
|
-
out_path = str(Path(args.output))
|
|
143
|
-
print("Extracting SFT data from traces…")
|
|
144
|
-
n = await extract_jsonl_from_traces(db_url, out_path, cfg)
|
|
145
|
-
print(f"✅ Wrote {n} examples → {out_path}")
|
|
146
|
-
|
|
147
|
-
# 2) Validate JSONL
|
|
148
|
-
validate_training_jsonl(out_path)
|
|
149
|
-
|
|
150
|
-
# 3) Upload and create FT job
|
|
151
|
-
client = FtClient(base_url=base_url, api_key=api_key)
|
|
152
|
-
file_id = await client.upload_training_file(Path(out_path), purpose="fine-tune")
|
|
153
|
-
print(f"file_id={file_id}")
|
|
154
|
-
save_state({"file_id": file_id})
|
|
155
|
-
|
|
156
|
-
create = await client.create_sft_job(
|
|
157
|
-
model=str(args.model),
|
|
158
|
-
training_file_id=file_id,
|
|
159
|
-
hyperparameters={"n_epochs": int(args.epochs), "batch_size": int(args.batch_size)},
|
|
160
|
-
metadata={"upload_to_wasabi": True},
|
|
161
|
-
)
|
|
162
|
-
job_id = (create or {}).get("job_id")
|
|
163
|
-
if not job_id:
|
|
164
|
-
raise RuntimeError(f"create_sft_job missing job_id: {create}")
|
|
165
|
-
print(f"job_id={job_id}")
|
|
166
|
-
save_state({"job_id": job_id})
|
|
167
|
-
|
|
168
|
-
start = await client.start_job(job_id)
|
|
169
|
-
print(f"start={start}")
|
|
170
|
-
|
|
171
|
-
# 4) Poll to terminal
|
|
172
|
-
handle = JobHandle(base_url, api_key, job_id, strict=True)
|
|
173
|
-
final = await handle.poll_until_terminal(interval_seconds=2.0, max_seconds=1800)
|
|
174
|
-
status = (final or {}).get("status")
|
|
175
|
-
print(f"final_status={status}")
|
|
176
|
-
ft_model = (final or {}).get("fine_tuned_model")
|
|
177
|
-
if ft_model:
|
|
178
|
-
save_state({"fine_tuned_model": ft_model})
|
|
179
|
-
print(f"fine_tuned_model={ft_model}")
|
|
180
|
-
|
|
181
|
-
# 5) Optional inference check
|
|
182
|
-
if not args.no_infer:
|
|
183
|
-
try:
|
|
184
|
-
ic = InferenceClient(base_url=base_url, api_key=api_key)
|
|
185
|
-
model_for_infer = ft_model or str(args.model)
|
|
186
|
-
print(f"\nInference sanity check (model={model_for_infer})…")
|
|
187
|
-
resp = await ic.create_chat_completion(
|
|
188
|
-
model=model_for_infer,
|
|
189
|
-
messages=[{"role": "user", "content": "Give me a cheerful two-line greeting."}],
|
|
190
|
-
max_tokens=128,
|
|
191
|
-
temperature=0.7,
|
|
192
|
-
stream=False,
|
|
193
|
-
)
|
|
194
|
-
print(resp)
|
|
195
|
-
except Exception as e:
|
|
196
|
-
print(f"(inference skipped due to error: {e})")
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
def main() -> None:
|
|
200
|
-
args = parse_args()
|
|
201
|
-
asyncio.run(run(args))
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
if __name__ == "__main__":
|
|
205
|
-
main()
|
|
206
|
-
|
|
207
|
-
|
|
@@ -1,232 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
End-to-end SFT workflow for Qwen (single script).
|
|
4
|
-
|
|
5
|
-
Steps performed:
|
|
6
|
-
1) Ensure/validate training JSONL (creates a minimal one if missing)
|
|
7
|
-
2) Upload training file → save file_id to state.json
|
|
8
|
-
3) Create SFT job (Qwen/Qwen3-0.6B by default) and start → save job_id
|
|
9
|
-
4) Poll until terminal → save fine_tuned_model when available
|
|
10
|
-
5) (Optional) Quick inference with the fine-tuned model (or base if absent)
|
|
11
|
-
|
|
12
|
-
Usage:
|
|
13
|
-
uv run python examples/finetuning/synth_qwen_v1/run_ft_job.py --mode dev
|
|
14
|
-
|
|
15
|
-
Options:
|
|
16
|
-
--mode {local,dev,prod} Backend mode/environment (default: env override or prod)
|
|
17
|
-
--data PATH Path to training JSONL (default: ./data/training.jsonl)
|
|
18
|
-
--model NAME Base model for SFT (default: Qwen/Qwen3-0.6B)
|
|
19
|
-
--epochs N Epochs (default: 1)
|
|
20
|
-
--batch-size N Batch size (default: 4)
|
|
21
|
-
--no-infer Skip the post-training inference check
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
from __future__ import annotations
|
|
25
|
-
|
|
26
|
-
import argparse
|
|
27
|
-
import asyncio
|
|
28
|
-
import json
|
|
29
|
-
import os
|
|
30
|
-
import sys
|
|
31
|
-
from pathlib import Path
|
|
32
|
-
from typing import Any
|
|
33
|
-
|
|
34
|
-
# Make repo root importable when running directly
|
|
35
|
-
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
|
36
|
-
|
|
37
|
-
from synth_ai.config.base_url import get_backend_from_env
|
|
38
|
-
from synth_ai.learning import FtClient, JobHandle, validate_training_jsonl # type: ignore
|
|
39
|
-
from synth_ai.inference import InferenceClient # type: ignore
|
|
40
|
-
from examples.finetuning.synth_qwen_v1.util import load_env, load_state, save_state # type: ignore
|
|
41
|
-
|
|
42
|
-
try:
|
|
43
|
-
from examples.common.backend import resolve_backend_url as _resolve_backend_default # type: ignore
|
|
44
|
-
except Exception: # pragma: no cover - fallback for direct execution
|
|
45
|
-
|
|
46
|
-
def _resolve_backend_default() -> str:
|
|
47
|
-
base, _ = get_backend_from_env()
|
|
48
|
-
base = base.rstrip("/")
|
|
49
|
-
return base if base.endswith("/api") else f"{base}/api"
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def parse_args() -> argparse.Namespace:
|
|
53
|
-
p = argparse.ArgumentParser(description="Run Qwen SFT end-to-end")
|
|
54
|
-
p.add_argument("--mode", choices=["prod", "dev", "local"], default=None)
|
|
55
|
-
p.add_argument("--data", default=str(Path(__file__).parent / "data" / "training_crafter.jsonl"))
|
|
56
|
-
p.add_argument("--model", default="Qwen/Qwen3-0.6B")
|
|
57
|
-
p.add_argument("--epochs", type=int, default=1)
|
|
58
|
-
p.add_argument("--batch-size", type=int, default=4, dest="batch_size")
|
|
59
|
-
p.add_argument("--no-infer", action="store_true")
|
|
60
|
-
return p.parse_args()
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def ensure_training_jsonl(path: Path) -> Path:
|
|
64
|
-
path.parent.mkdir(parents=True, exist_ok=True)
|
|
65
|
-
if not path.exists():
|
|
66
|
-
# Minimal JSONL with a single example
|
|
67
|
-
lines: list[str] = [
|
|
68
|
-
json.dumps({
|
|
69
|
-
"messages": [
|
|
70
|
-
{"role": "user", "content": "Write a short greeting."},
|
|
71
|
-
{"role": "assistant", "content": "Hello there!"},
|
|
72
|
-
]
|
|
73
|
-
})
|
|
74
|
-
]
|
|
75
|
-
path.write_text("\n".join(lines) + "\n")
|
|
76
|
-
# Validate using shared SDK validator
|
|
77
|
-
validate_training_jsonl(path)
|
|
78
|
-
return path
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
async def run(args: argparse.Namespace) -> None:
|
|
82
|
-
# Resolve backend and key
|
|
83
|
-
base_url, api_key = load_env(args.mode)
|
|
84
|
-
# Force canonical prod base when prod mode (or override) is selected
|
|
85
|
-
try:
|
|
86
|
-
if (args.mode == "prod") or (os.getenv("SYNTH_BACKEND_URL_OVERRIDE", "").strip().lower() == "prod"):
|
|
87
|
-
base_url = _resolve_backend_default()
|
|
88
|
-
# Also export for any downstream helpers that read env
|
|
89
|
-
os.environ["PROD_BACKEND_URL"] = base_url
|
|
90
|
-
except Exception:
|
|
91
|
-
pass
|
|
92
|
-
|
|
93
|
-
# Ensure/validate training JSONL
|
|
94
|
-
data_path = ensure_training_jsonl(Path(args.data))
|
|
95
|
-
print(f"Training JSONL: {data_path}")
|
|
96
|
-
|
|
97
|
-
# Upload file
|
|
98
|
-
ft = FtClient(base_url=base_url, api_key=api_key)
|
|
99
|
-
file_id = await ft.upload_training_file(data_path, purpose="fine-tune")
|
|
100
|
-
if not file_id:
|
|
101
|
-
raise RuntimeError("upload_training_file returned empty file_id")
|
|
102
|
-
print(f"file_id={file_id}")
|
|
103
|
-
save_state({"file_id": file_id})
|
|
104
|
-
|
|
105
|
-
# Create job
|
|
106
|
-
hyperparameters: dict[str, Any] = {
|
|
107
|
-
"n_epochs": int(args.epochs),
|
|
108
|
-
"batch_size": int(args.batch_size),
|
|
109
|
-
}
|
|
110
|
-
# Include explicit compute topology for billing/inference resolution.
|
|
111
|
-
# Default: 1x A10G (can be surfaced via CLI later if needed).
|
|
112
|
-
metadata = {
|
|
113
|
-
"upload_to_wasabi": True,
|
|
114
|
-
# Normalized effective config consumed by the backend SFT workflow
|
|
115
|
-
"effective_config": {
|
|
116
|
-
"compute": {
|
|
117
|
-
"gpu_type": "A10G",
|
|
118
|
-
"gpu_count": 1,
|
|
119
|
-
"nodes": 1,
|
|
120
|
-
},
|
|
121
|
-
"data": {
|
|
122
|
-
"topology": {
|
|
123
|
-
"gpu_type": "A10G",
|
|
124
|
-
"container_count": 1,
|
|
125
|
-
}
|
|
126
|
-
}
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
create_resp = await ft.create_sft_job(
|
|
131
|
-
model=str(args.model),
|
|
132
|
-
training_file_id=file_id,
|
|
133
|
-
hyperparameters=hyperparameters,
|
|
134
|
-
metadata=metadata,
|
|
135
|
-
)
|
|
136
|
-
job_id = (create_resp or {}).get("job_id")
|
|
137
|
-
if not job_id:
|
|
138
|
-
raise RuntimeError(f"create_sft_job missing job_id: {create_resp}")
|
|
139
|
-
print(f"job_id={job_id}")
|
|
140
|
-
save_state({"job_id": job_id})
|
|
141
|
-
|
|
142
|
-
# Start job
|
|
143
|
-
start_resp = await ft.start_job(job_id)
|
|
144
|
-
print(f"start={start_resp}")
|
|
145
|
-
|
|
146
|
-
# Poll until terminal with streaming event/metric logs
|
|
147
|
-
def _on_event(e: dict[str, Any]) -> None:
|
|
148
|
-
try:
|
|
149
|
-
seq = e.get("seq")
|
|
150
|
-
etype = e.get("type") or e.get("event_type")
|
|
151
|
-
msg = e.get("message")
|
|
152
|
-
print(f"event seq={seq} type={etype} msg={msg}")
|
|
153
|
-
except Exception:
|
|
154
|
-
pass
|
|
155
|
-
|
|
156
|
-
def _on_metric(p: dict[str, Any]) -> None:
|
|
157
|
-
try:
|
|
158
|
-
name = str(p.get("name") or "")
|
|
159
|
-
step = p.get("step")
|
|
160
|
-
epoch = p.get("epoch")
|
|
161
|
-
val = p.get("value")
|
|
162
|
-
print(f"metric {name} step={step} epoch={epoch} value={val}")
|
|
163
|
-
except Exception:
|
|
164
|
-
pass
|
|
165
|
-
|
|
166
|
-
handle = JobHandle(base_url, api_key, job_id, strict=True)
|
|
167
|
-
final = await handle.poll_until_terminal(
|
|
168
|
-
interval_seconds=2.0,
|
|
169
|
-
max_seconds=1800,
|
|
170
|
-
on_event=_on_event,
|
|
171
|
-
on_metric=_on_metric,
|
|
172
|
-
)
|
|
173
|
-
status = (final or {}).get("status")
|
|
174
|
-
print(f"final_status={status}")
|
|
175
|
-
ft_model = (final or {}).get("fine_tuned_model")
|
|
176
|
-
if ft_model:
|
|
177
|
-
print(f"fine_tuned_model={ft_model}")
|
|
178
|
-
save_state({"fine_tuned_model": ft_model})
|
|
179
|
-
|
|
180
|
-
# Optional: quick inference check
|
|
181
|
-
if not args.no_infer:
|
|
182
|
-
model_for_infer = ft_model or str(args.model)
|
|
183
|
-
try:
|
|
184
|
-
ic = InferenceClient(base_url=base_url, api_key=api_key, timeout=600.0)
|
|
185
|
-
print(f"\nInference sanity check (model={model_for_infer})…")
|
|
186
|
-
resp = await ic.create_chat_completion(
|
|
187
|
-
model=model_for_infer,
|
|
188
|
-
messages=[{"role": "user", "content": "Give me a cheerful two-line greeting."}],
|
|
189
|
-
max_tokens=128,
|
|
190
|
-
temperature=0.7,
|
|
191
|
-
stream=False,
|
|
192
|
-
)
|
|
193
|
-
print(resp)
|
|
194
|
-
except Exception as e:
|
|
195
|
-
# Always print full error details and traceback
|
|
196
|
-
import traceback
|
|
197
|
-
try:
|
|
198
|
-
from synth_ai.http import HTTPError # type: ignore
|
|
199
|
-
except Exception: # pragma: no cover - fallback if import shape changes
|
|
200
|
-
HTTPError = tuple() # type: ignore
|
|
201
|
-
print("\n===== Inference Error =====")
|
|
202
|
-
print(f"Type: {type(e).__name__}")
|
|
203
|
-
print(f"Repr: {repr(e)}")
|
|
204
|
-
tb = traceback.format_exc()
|
|
205
|
-
if tb:
|
|
206
|
-
print("Traceback:")
|
|
207
|
-
print(tb)
|
|
208
|
-
# If HTTP error from backend, surface structured fields
|
|
209
|
-
if 'HTTPError' in str(type(e)) or (isinstance((), tuple) and False):
|
|
210
|
-
pass
|
|
211
|
-
try:
|
|
212
|
-
if HTTPError and isinstance(e, HTTPError): # type: ignore[arg-type]
|
|
213
|
-
print("HTTPError details:")
|
|
214
|
-
print(f" status={e.status}")
|
|
215
|
-
print(f" url={e.url}")
|
|
216
|
-
print(f" message={e.message}")
|
|
217
|
-
if getattr(e, 'detail', None) is not None:
|
|
218
|
-
print(f" detail={e.detail}")
|
|
219
|
-
if getattr(e, 'body_snippet', None):
|
|
220
|
-
print(f" body_snippet={e.body_snippet}")
|
|
221
|
-
except Exception:
|
|
222
|
-
pass
|
|
223
|
-
print("===== End Inference Error =====\n")
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def main() -> None:
|
|
227
|
-
args = parse_args()
|
|
228
|
-
asyncio.run(run(args))
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
if __name__ == "__main__":
|
|
232
|
-
main()
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
|
|
7
|
-
import sys
|
|
8
|
-
import os
|
|
9
|
-
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
|
10
|
-
|
|
11
|
-
from synth_ai.learning import FtClient, validate_training_jsonl
|
|
12
|
-
from examples.finetuning.synth_qwen_v1.util import load_env, save_state, parse_args
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
async def _run(mode: str | None) -> None:
|
|
16
|
-
base, key = load_env(mode)
|
|
17
|
-
client = FtClient(base_url=base, api_key=key)
|
|
18
|
-
|
|
19
|
-
p = Path(__file__).parent / "data" / "training.jsonl"
|
|
20
|
-
# Use shared validator from synth_ai.learning.validators
|
|
21
|
-
validate_training_jsonl(p)
|
|
22
|
-
file_id = await client.upload_training_file(p, purpose="fine-tune")
|
|
23
|
-
print(f"file_id={file_id}")
|
|
24
|
-
save_state({"file_id": file_id})
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def main() -> None:
|
|
28
|
-
args = parse_args()
|
|
29
|
-
asyncio.run(_run(args.mode))
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
if __name__ == "__main__":
|
|
33
|
-
main()
|
|
34
|
-
|
|
@@ -1,147 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import json
|
|
5
|
-
import argparse
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import Any, Dict
|
|
8
|
-
|
|
9
|
-
from synth_ai.config.base_url import get_backend_from_env
|
|
10
|
-
|
|
11
|
-
try:
|
|
12
|
-
from dotenv import load_dotenv # type: ignore[reportMissingImports]
|
|
13
|
-
except Exception: # pragma: no cover
|
|
14
|
-
def load_dotenv(*args, **kwargs): # type: ignore[no-redef]
|
|
15
|
-
return False
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
STATE_PATH = Path(__file__).parent / "state.json"
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def _default_backend_url() -> str:
|
|
22
|
-
base, _ = get_backend_from_env()
|
|
23
|
-
base = base.rstrip("/")
|
|
24
|
-
return base if base.endswith("/api") else f"{base}/api"
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def load_env(mode: str | None = None) -> tuple[str, str]:
|
|
28
|
-
"""Resolve backend base_url and api_key.
|
|
29
|
-
|
|
30
|
-
Precedence:
|
|
31
|
-
- SYNTH_BACKEND_URL_OVERRIDE=local|dev|prod (preferred)
|
|
32
|
-
- explicit mode arg (local|dev|prod)
|
|
33
|
-
- default prod
|
|
34
|
-
"""
|
|
35
|
-
load_dotenv()
|
|
36
|
-
# Prefer global override if present
|
|
37
|
-
override = (os.getenv("SYNTH_BACKEND_URL_OVERRIDE", "") or "").strip().lower()
|
|
38
|
-
if override in {"local", "dev", "prod"}:
|
|
39
|
-
base, key = get_backend_from_env()
|
|
40
|
-
base = base.rstrip("/")
|
|
41
|
-
print(f"SYNTH backend: {base} (override={override})")
|
|
42
|
-
# Print masked API key and source for clarity
|
|
43
|
-
src = ""
|
|
44
|
-
if override == "prod":
|
|
45
|
-
if key and key == os.getenv("PROD_SYNTH_API_KEY", "").strip():
|
|
46
|
-
src = "PROD_SYNTH_API_KEY"
|
|
47
|
-
elif key and key == os.getenv("TESTING_PROD_SYNTH_API_KEY", "").strip():
|
|
48
|
-
src = "TESTING_PROD_SYNTH_API_KEY"
|
|
49
|
-
elif key and key == os.getenv("SYNTH_API_KEY", "").strip():
|
|
50
|
-
src = "SYNTH_API_KEY"
|
|
51
|
-
elif override == "dev":
|
|
52
|
-
if key and key == os.getenv("DEV_SYNTH_API_KEY", "").strip():
|
|
53
|
-
src = "DEV_SYNTH_API_KEY"
|
|
54
|
-
else: # local
|
|
55
|
-
if key and key == os.getenv("DEV_SYNTH_API_KEY", "").strip():
|
|
56
|
-
src = "DEV_SYNTH_API_KEY"
|
|
57
|
-
elif key and key == os.getenv("TESTING_LOCAL_SYNTH_API_KEY", "").strip():
|
|
58
|
-
src = "TESTING_LOCAL_SYNTH_API_KEY"
|
|
59
|
-
masked = ("*" * max(0, len(key) - 6)) + key[-6:] if key else "<empty>"
|
|
60
|
-
print(f"SYNTH api key: {masked} (len={len(key)}, src={src or '<unknown>'})")
|
|
61
|
-
return base, key
|
|
62
|
-
|
|
63
|
-
# Fallback to explicit mode
|
|
64
|
-
if mode is None:
|
|
65
|
-
mode = os.getenv("SYNTH_MODE", "prod").strip().lower()
|
|
66
|
-
if mode == "local":
|
|
67
|
-
base_url = os.getenv("LOCAL_BACKEND_URL", "").strip()
|
|
68
|
-
# Prefer DEV_SYNTH_API_KEY for local development; fall back to legacy var
|
|
69
|
-
api_key = (
|
|
70
|
-
os.getenv("DEV_SYNTH_API_KEY", "").strip()
|
|
71
|
-
or os.getenv("TESTING_LOCAL_SYNTH_API_KEY", "").strip()
|
|
72
|
-
)
|
|
73
|
-
if not base_url or not api_key:
|
|
74
|
-
raise RuntimeError("Missing LOCAL_BACKEND_URL or DEV_SYNTH_API_KEY/TESTING_LOCAL_SYNTH_API_KEY in environment/.env")
|
|
75
|
-
elif mode == "dev":
|
|
76
|
-
base_url = os.getenv("DEV_BACKEND_URL", "").strip()
|
|
77
|
-
api_key = os.getenv("DEV_SYNTH_API_KEY", "").strip()
|
|
78
|
-
if not base_url or not api_key:
|
|
79
|
-
raise RuntimeError("Missing DEV_BACKEND_URL or DEV_SYNTH_API_KEY in environment/.env")
|
|
80
|
-
else: # prod
|
|
81
|
-
base_url = os.getenv("PROD_BACKEND_URL", "").strip() or _default_backend_url()
|
|
82
|
-
api_key = (
|
|
83
|
-
os.getenv("PROD_SYNTH_API_KEY", "").strip()
|
|
84
|
-
or os.getenv("TESTING_PROD_SYNTH_API_KEY", "").strip()
|
|
85
|
-
or os.getenv("SYNTH_API_KEY", "").strip()
|
|
86
|
-
)
|
|
87
|
-
if not api_key:
|
|
88
|
-
raise RuntimeError("Missing PROD_SYNTH_API_KEY/TESTING_PROD_SYNTH_API_KEY/SYNTH_API_KEY in environment/.env")
|
|
89
|
-
base_url = base_url.rstrip("/")
|
|
90
|
-
print(f"SYNTH backend: {base_url} (mode={mode})")
|
|
91
|
-
# Also print masked API key and source
|
|
92
|
-
src = ""
|
|
93
|
-
if mode == "prod":
|
|
94
|
-
if api_key and api_key == os.getenv("PROD_SYNTH_API_KEY", "").strip():
|
|
95
|
-
src = "PROD_SYNTH_API_KEY"
|
|
96
|
-
elif api_key and api_key == os.getenv("TESTING_PROD_SYNTH_API_KEY", "").strip():
|
|
97
|
-
src = "TESTING_PROD_SYNTH_API_KEY"
|
|
98
|
-
elif api_key and api_key == os.getenv("SYNTH_API_KEY", "").strip():
|
|
99
|
-
src = "SYNTH_API_KEY"
|
|
100
|
-
elif mode == "dev":
|
|
101
|
-
if api_key and api_key == os.getenv("DEV_SYNTH_API_KEY", "").strip():
|
|
102
|
-
src = "DEV_SYNTH_API_KEY"
|
|
103
|
-
else:
|
|
104
|
-
if api_key and api_key == os.getenv("DEV_SYNTH_API_KEY", "").strip():
|
|
105
|
-
src = "DEV_SYNTH_API_KEY"
|
|
106
|
-
elif api_key and api_key == os.getenv("TESTING_LOCAL_SYNTH_API_KEY", "").strip():
|
|
107
|
-
src = "TESTING_LOCAL_SYNTH_API_KEY"
|
|
108
|
-
masked = ("*" * max(0, len(api_key) - 6)) + api_key[-6:] if api_key else "<empty>"
|
|
109
|
-
print(f"SYNTH api key: {masked} (len={len(api_key)}, src={src or '<unknown>'})")
|
|
110
|
-
return base_url, api_key
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def parse_args() -> argparse.Namespace:
|
|
114
|
-
p = argparse.ArgumentParser()
|
|
115
|
-
p.add_argument("--mode", choices=["prod", "dev", "local"], default=None, help="Backend mode")
|
|
116
|
-
return p.parse_args()
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
def save_state(obj: Dict[str, Any]) -> None:
|
|
120
|
-
prev: Dict[str, Any] = {}
|
|
121
|
-
if STATE_PATH.exists():
|
|
122
|
-
try:
|
|
123
|
-
prev = json.loads(STATE_PATH.read_text())
|
|
124
|
-
except Exception:
|
|
125
|
-
prev = {}
|
|
126
|
-
prev.update(obj)
|
|
127
|
-
STATE_PATH.write_text(json.dumps(prev, indent=2))
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def load_state() -> Dict[str, Any]:
|
|
131
|
-
if not STATE_PATH.exists():
|
|
132
|
-
return {}
|
|
133
|
-
try:
|
|
134
|
-
return json.loads(STATE_PATH.read_text())
|
|
135
|
-
except Exception:
|
|
136
|
-
return {}
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
def validate_jsonl(path: str | Path) -> None:
|
|
140
|
-
"""Backwards-compatible wrapper that delegates to shared SDK validator.
|
|
141
|
-
|
|
142
|
-
Prefer synth_ai.learning.validators.validate_training_jsonl to keep a single source
|
|
143
|
-
of JSONL validation rules used across examples and tests.
|
|
144
|
-
"""
|
|
145
|
-
from synth_ai.learning import validate_training_jsonl
|
|
146
|
-
|
|
147
|
-
validate_training_jsonl(path)
|